mptcp.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
* [MPTCP][PATCH v5 mptcp-next 0/5] fullmesh path manager support
@ 2021-07-27  7:58 Geliang Tang
  2021-07-27  7:58 ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
  0 siblings, 1 reply; 15+ messages in thread
From: Geliang Tang @ 2021-07-27  7:58 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

v5:
 - patch 1, add a new helper lookup_address_in_vec.
 - patch 2, update pm.subflows in the non-fullmesh case.
 - patch 4, add more tests.
 - tag: export/20210727T054640

v4:
 - add new helpers, fill_local/remote_addresses_vec
 - add max_subflows checks
 - add 'local' into the local addresses array only when no fullmesh
   entry found.
 - add signal,fullmesh check

v3:
 - the in-kernel fullmesh path manager has been dropped from this
   patchset, only keep the fullmesh flag support code.

v2:
 - Implement the fullmesh mode as an extension to the netlink PM, not a
   standalone PM as Paolo suggested.
 - drop duplicate code.
 - add a new per endpoint flag MPTCP_PM_ADDR_FLAG_FULLMESH.

Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/193

Geliang Tang (5):
  mptcp: remote addresses fullmesh
  mptcp: local addresses fullmesh
  selftests: mptcp: set and print the fullmesh flag
  selftests: mptcp: add fullmesh testcases
  selftests: mptcp: delete uncontinuous removing ids

 include/uapi/linux/mptcp.h                    |   1 +
 net/mptcp/pm_netlink.c                        | 140 ++++++++++++++++--
 .../testing/selftests/net/mptcp/mptcp_join.sh |  67 ++++++++-
 tools/testing/selftests/net/mptcp/pm_nl_ctl.c |  16 +-
 4 files changed, 206 insertions(+), 18 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh
  2021-07-27  7:58 [MPTCP][PATCH v5 mptcp-next 0/5] fullmesh path manager support Geliang Tang
@ 2021-07-27  7:58 ` Geliang Tang
  2021-07-27  7:58   ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local " Geliang Tang
                     ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27  7:58 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang, Paolo Abeni

From: Geliang Tang <geliangtang@xiaomi.com>

This patch added and managed a new per endpoint flag, named
MPTCP_PM_ADDR_FLAG_FULLMESH.

In mptcp_pm_create_subflow_or_signal_addr(), if such flag is set, instead
of:

        remote_address((struct sock_common *)sk, &remote);

fill a temporary allocated array of all known remote address. After
releaseing the pm lock loop on such array and create a subflow for each
remote address from the given local.

Note that the we could still use an array even for non 'fullmesh'
endpoint: with a single entry corresponding to the primary MPC subflow
remote address.

Suggested-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 include/uapi/linux/mptcp.h |  1 +
 net/mptcp/pm_netlink.c     | 80 +++++++++++++++++++++++++++++++++++---
 2 files changed, 76 insertions(+), 5 deletions(-)

diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 7b05f7102321..f66038b9551f 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -73,6 +73,7 @@ enum {
 #define MPTCP_PM_ADDR_FLAG_SIGNAL			(1 << 0)
 #define MPTCP_PM_ADDR_FLAG_SUBFLOW			(1 << 1)
 #define MPTCP_PM_ADDR_FLAG_BACKUP			(1 << 2)
+#define MPTCP_PM_ADDR_FLAG_FULLMESH			(1 << 3)
 
 enum {
 	MPTCP_PM_CMD_UNSPEC,
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index ba0e1d71504d..2259c424485f 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -158,6 +158,27 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
 	return false;
 }
 
+static bool lookup_subflow_by_addrs(const struct list_head *list,
+				    struct mptcp_addr_info *saddr,
+				    struct mptcp_addr_info *daddr)
+{
+	struct mptcp_subflow_context *subflow;
+	struct mptcp_addr_info local, remote;
+	struct sock_common *skc;
+
+	list_for_each_entry(subflow, list, node) {
+		skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
+
+		local_address(skc, &local);
+		remote_address(skc, &remote);
+		if (addresses_equal(&local, saddr, saddr->port) &&
+		    addresses_equal(&remote, daddr, daddr->port))
+			return true;
+	}
+
+	return false;
+}
+
 static struct mptcp_pm_addr_entry *
 select_local_address(const struct pm_nl_pernet *pernet,
 		     struct mptcp_sock *msk)
@@ -410,6 +431,53 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
 	}
 }
 
+static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr,
+				  struct mptcp_addr_info *addr)
+{
+	int i;
+
+	for (i = 0; i < nr; i++) {
+		if (addresses_equal(&addrs[i], addr, addr->port))
+			return true;
+	}
+
+	return false;
+}
+
+static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
+					      struct mptcp_pm_addr_entry *local,
+					      struct mptcp_addr_info *addrs)
+{
+	struct sock *sk = (struct sock *)msk, *ssk;
+	struct mptcp_subflow_context *subflow;
+	struct mptcp_addr_info remote = { 0 };
+	struct pm_nl_pernet *pernet;
+	unsigned int subflows_max;
+	int i = 0;
+
+	pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
+	subflows_max = mptcp_pm_get_subflows_max(msk);
+
+	if (!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) {
+		remote_address((struct sock_common *)sk, &remote);
+		msk->pm.subflows++;
+		addrs[i++] = remote;
+	} else {
+		mptcp_for_each_subflow(msk, subflow) {
+			ssk = mptcp_subflow_tcp_sock(subflow);
+			remote_address((struct sock_common *)ssk, &remote);
+			if (!lookup_subflow_by_addrs(&msk->conn_list, &local->addr, &remote) &&
+			    !lookup_address_in_vec(addrs, i, &remote) &&
+			    msk->pm.subflows < subflows_max) {
+				msk->pm.subflows++;
+				addrs[i++] = remote;
+			}
+		}
+	}
+
+	return i;
+}
+
 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
 {
 	struct sock *sk = (struct sock *)msk;
@@ -455,15 +523,17 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
 	    !READ_ONCE(msk->pm.remote_deny_join_id0)) {
 		local = select_local_address(pernet, msk);
 		if (local) {
-			struct mptcp_addr_info remote = { 0 };
+			struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
+			int i, nr;
 
 			msk->pm.local_addr_used++;
-			msk->pm.subflows++;
 			check_work_pending(msk);
-			remote_address((struct sock_common *)sk, &remote);
+			nr = fill_remote_addresses_vec(msk, local, addrs);
 			spin_unlock_bh(&msk->pm.lock);
-			__mptcp_subflow_connect(sk, &local->addr, &remote,
-						local->flags, local->ifindex);
+			for (i = 0; i < nr; i++) {
+				__mptcp_subflow_connect(sk, &local->addr, &addrs[i],
+							local->flags, local->ifindex);
+			}
 			spin_lock_bh(&msk->pm.lock);
 			return;
 		}
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local addresses fullmesh
  2021-07-27  7:58 ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
@ 2021-07-27  7:58   ` Geliang Tang
  2021-07-27  7:58     ` [MPTCP][PATCH v5 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag Geliang Tang
  2021-07-27  9:52     ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local addresses fullmesh Paolo Abeni
  2021-07-27  9:38   ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote " Paolo Abeni
                     ` (2 subsequent siblings)
  3 siblings, 2 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27  7:58 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang, Paolo Abeni

From: Geliang Tang <geliangtang@xiaomi.com>

In mptcp_pm_nl_add_addr_received(), fill a temporary allocate array of
all local address corresponding to the fullmesh endpoint. If such array
is empty, keep the current behavior.

Elsewhere loop on such array and create a subflow for each local address
towards the given remote address

Suggested-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 net/mptcp/pm_netlink.c | 60 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 55 insertions(+), 5 deletions(-)

diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 2259c424485f..a85bac950f3b 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -554,13 +554,62 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
 	mptcp_pm_create_subflow_or_signal_addr(msk);
 }
 
+static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+					     struct mptcp_addr_info *remote,
+					     struct mptcp_pm_addr_entry *entries)
+{
+	struct mptcp_pm_addr_entry local, *entry;
+	struct sock *sk = (struct sock *)msk;
+	struct pm_nl_pernet *pernet;
+	unsigned int subflows_max;
+	int i = 0;
+
+	pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
+	subflows_max = mptcp_pm_get_subflows_max(msk);
+
+	rcu_read_lock();
+	__mptcp_flush_join_list(msk);
+	list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+		if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
+			continue;
+
+		if (entry->addr.family != sk->sk_family) {
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+			if ((entry->addr.family == AF_INET &&
+			     !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
+			    (sk->sk_family == AF_INET &&
+			     !ipv6_addr_v4mapped(&entry->addr.addr6)))
+#endif
+				continue;
+		}
+
+		if (!lookup_subflow_by_addrs(&msk->conn_list, &entry->addr, remote) &&
+		    msk->pm.subflows < subflows_max) {
+			msk->pm.subflows++;
+			entries[i++] = *entry;
+		}
+	}
+	rcu_read_unlock();
+
+	if (!i) {
+		memset(&local, 0, sizeof(local));
+		local.addr.family = remote->family;
+
+		msk->pm.subflows++;
+		entries[i++] = local;
+	}
+
+	return i;
+}
+
 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
 {
+	struct mptcp_pm_addr_entry entries[MPTCP_PM_ADDR_MAX];
 	struct sock *sk = (struct sock *)msk;
 	unsigned int add_addr_accept_max;
 	struct mptcp_addr_info remote;
-	struct mptcp_addr_info local;
 	unsigned int subflows_max;
+	int i, nr;
 
 	add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
 	subflows_max = mptcp_pm_get_subflows_max(msk);
@@ -584,11 +633,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
 	remote = msk->pm.remote;
 	if (!remote.port)
 		remote.port = sk->sk_dport;
-	memset(&local, 0, sizeof(local));
-	local.family = remote.family;
-
+	nr = fill_local_addresses_vec(msk, &remote, entries);
 	spin_unlock_bh(&msk->pm.lock);
-	__mptcp_subflow_connect(sk, &local, &remote, 0, 0);
+	for (i = 0; i < nr; i++) {
+		__mptcp_subflow_connect(sk, &entries[i].addr, &remote,
+					entries[i].flags, entries[i].ifindex);
+	}
 	spin_lock_bh(&msk->pm.lock);
 
 add_addr_echo:
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [MPTCP][PATCH v5 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag
  2021-07-27  7:58   ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local " Geliang Tang
@ 2021-07-27  7:58     ` Geliang Tang
  2021-07-27  7:58       ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Geliang Tang
  2021-07-27  9:52     ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local addresses fullmesh Paolo Abeni
  1 sibling, 1 reply; 15+ messages in thread
From: Geliang Tang @ 2021-07-27  7:58 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

This patch dealt with the MPTCP_PM_ADDR_FLAG_FULLMESH flag in add_addr()
and print_addr(), to set and print out the fullmesh flag.

Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 tools/testing/selftests/net/mptcp/pm_nl_ctl.c | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index 115decfdc1ef..354784512748 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -25,7 +25,7 @@
 static void syntax(char *argv[])
 {
 	fprintf(stderr, "%s add|get|set|del|flush|dump|accept [<args>]\n", argv[0]);
-	fprintf(stderr, "\tadd [flags signal|subflow|backup] [id <nr>] [dev <name>] <ip>\n");
+	fprintf(stderr, "\tadd [flags signal|subflow|backup|fullmesh] [id <nr>] [dev <name>] <ip>\n");
 	fprintf(stderr, "\tdel <id> [<ip>]\n");
 	fprintf(stderr, "\tget <id>\n");
 	fprintf(stderr, "\tset <ip> [flags backup|nobackup]\n");
@@ -236,11 +236,18 @@ int add_addr(int fd, int pm_family, int argc, char *argv[])
 					flags |= MPTCP_PM_ADDR_FLAG_SIGNAL;
 				else if (!strcmp(tok, "backup"))
 					flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
+				else if (!strcmp(tok, "fullmesh"))
+					flags |= MPTCP_PM_ADDR_FLAG_FULLMESH;
 				else
 					error(1, errno,
 					      "unknown flag %s", argv[arg]);
 			}
 
+			if (flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
+			    flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+				error(1, errno, "error flag fullmesh");
+			}
+
 			rta = (void *)(data + off);
 			rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
 			rta->rta_len = RTA_LENGTH(4);
@@ -422,6 +429,13 @@ static void print_addr(struct rtattr *attrs, int len)
 					printf(",");
 			}
 
+			if (flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+				printf("fullmesh");
+				flags &= ~MPTCP_PM_ADDR_FLAG_FULLMESH;
+				if (flags)
+					printf(",");
+			}
+
 			/* bump unknown flags, if any */
 			if (flags)
 				printf("0x%x", flags);
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases
  2021-07-27  7:58     ` [MPTCP][PATCH v5 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag Geliang Tang
@ 2021-07-27  7:58       ` Geliang Tang
  2021-07-27  7:58         ` [MPTCP][PATCH v5 mptcp-next 5/5] selftests: mptcp: delete uncontinuous removing ids Geliang Tang
  2021-07-27 10:03         ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Paolo Abeni
  0 siblings, 2 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27  7:58 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

This patch added the testcases for the fullmesh address flag of the path
manager.

Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 .../testing/selftests/net/mptcp/mptcp_join.sh | 57 ++++++++++++++++++-
 1 file changed, 54 insertions(+), 3 deletions(-)

diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 937e861e9490..ca19762b9c6e 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -367,7 +367,13 @@ do_transfer()
 	fi
 
 	if [ $addr_nr_ns2 -gt 0 ]; then
-		let add_nr_ns2=addr_nr_ns2
+		if [ $addr_nr_ns2 -gt 10 ]; then
+			let add_nr_ns2=addr_nr_ns2-10
+			flags=subflow,fullmesh
+		else
+			let add_nr_ns2=addr_nr_ns2
+			flags=subflow
+		fi
 		counter=3
 		sleep 1
 		while [ $add_nr_ns2 -gt 0 ]; do
@@ -377,7 +383,7 @@ do_transfer()
 			else
 				addr="10.0.$counter.2"
 			fi
-			ip netns exec $ns2 ./pm_nl_ctl add $addr flags subflow
+			ip netns exec $ns2 ./pm_nl_ctl add $addr flags $flags
 			let counter+=1
 			let add_nr_ns2-=1
 		done
@@ -1697,6 +1703,46 @@ deny_join_id0_tests()
 	chk_join_nr "subflow and address allow join id0 2" 1 1 1
 }
 
+fullmesh_tests()
+{
+	# fullmesh 1
+	reset
+	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow,fullmesh
+	run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
+	chk_join_nr "fullmesh test 1" 2 2 2
+	chk_add_nr 1 1
+
+	# fullmesh 2
+	reset
+	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow,fullmesh
+	ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow,fullmesh
+	run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
+	chk_join_nr "fullmesh test 2" 4 4 4
+	chk_add_nr 1 1
+
+	# fullmesh 3
+	reset
+	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+	run_tests $ns1 $ns2 10.0.1.1 0 0 11 slow
+	chk_join_nr "fullmesh test 3" 3 3 3
+	chk_add_nr 1 1
+
+	# fullmesh 4
+	reset
+	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+	run_tests $ns1 $ns2 10.0.1.1 0 0 12 slow
+	chk_join_nr "fullmesh test 4" 5 5 5
+	chk_add_nr 1 1
+}
+
 all_tests()
 {
 	subflows_tests
@@ -1712,6 +1758,7 @@ all_tests()
 	syncookies_tests
 	checksum_tests
 	deny_join_id0_tests
+	fullmesh_tests
 }
 
 usage()
@@ -1730,6 +1777,7 @@ usage()
 	echo "  -k syncookies_tests"
 	echo "  -S checksum_tests"
 	echo "  -d deny_join_id0_tests"
+	echo "  -m fullmesh_tests"
 	echo "  -c capture pcap files"
 	echo "  -C enable data checksum"
 	echo "  -h help"
@@ -1765,7 +1813,7 @@ if [ $do_all_tests -eq 1 ]; then
 	exit $ret
 fi
 
-while getopts 'fsltra64bpkdchCS' opt; do
+while getopts 'fsltra64bpkdmchCS' opt; do
 	case $opt in
 		f)
 			subflows_tests
@@ -1806,6 +1854,9 @@ while getopts 'fsltra64bpkdchCS' opt; do
 		d)
 			deny_join_id0_tests
 			;;
+		m)
+			fullmesh_tests
+			;;
 		c)
 			;;
 		C)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [MPTCP][PATCH v5 mptcp-next 5/5] selftests: mptcp: delete uncontinuous removing ids
  2021-07-27  7:58       ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Geliang Tang
@ 2021-07-27  7:58         ` Geliang Tang
  2021-07-27 10:03         ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Paolo Abeni
  1 sibling, 0 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27  7:58 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

The removing addresses testcases can only deal with the continuous ids.
This patch added the uncontinuous removing ids support.

Fixes: f87744ad42446 ("selftests: mptcp: set addr id for removing testcases")
Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 tools/testing/selftests/net/mptcp/mptcp_join.sh | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index ca19762b9c6e..3d906792e963 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -344,17 +344,18 @@ do_transfer()
 		let rm_nr_ns1=-addr_nr_ns1
 		if [ $rm_nr_ns1 -lt 8 ]; then
 			counter=1
+			pos=1
 			dump=(`ip netns exec ${listener_ns} ./pm_nl_ctl dump`)
 			if [ ${#dump[@]} -gt 0 ]; then
-				id=${dump[1]}
 				sleep 1
 
 				while [ $counter -le $rm_nr_ns1 ]
 				do
+					id=${dump[$pos]}
 					ip netns exec ${listener_ns} ./pm_nl_ctl del $id
 					sleep 1
 					let counter+=1
-					let id+=1
+					let pos+=5
 				done
 			fi
 		elif [ $rm_nr_ns1 -eq 8 ]; then
@@ -392,17 +393,18 @@ do_transfer()
 		let rm_nr_ns2=-addr_nr_ns2
 		if [ $rm_nr_ns2 -lt 8 ]; then
 			counter=1
+			pos=1
 			dump=(`ip netns exec ${connector_ns} ./pm_nl_ctl dump`)
 			if [ ${#dump[@]} -gt 0 ]; then
-				id=${dump[1]}
 				sleep 1
 
 				while [ $counter -le $rm_nr_ns2 ]
 				do
+					id=${dump[$pos]}
 					ip netns exec ${connector_ns} ./pm_nl_ctl del $id
 					sleep 1
 					let counter+=1
-					let id+=1
+					let pos+=5
 				done
 			fi
 		elif [ $rm_nr_ns2 -eq 8 ]; then
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh
  2021-07-27  7:58 ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
  2021-07-27  7:58   ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local " Geliang Tang
@ 2021-07-27  9:38   ` Paolo Abeni
  2021-07-27 12:36     ` Geliang Tang
  2021-07-27  9:40   ` Paolo Abeni
  2021-07-27  9:51   ` Paolo Abeni
  3 siblings, 1 reply; 15+ messages in thread
From: Paolo Abeni @ 2021-07-27  9:38 UTC (permalink / raw)
  To: Geliang Tang, mptcp; +Cc: Geliang Tang

Hello,

On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> From: Geliang Tang <geliangtang@xiaomi.com>
> 
> This patch added and managed a new per endpoint flag, named
> MPTCP_PM_ADDR_FLAG_FULLMESH.
> 
> In mptcp_pm_create_subflow_or_signal_addr(), if such flag is set, instead
> of:
> 
>         remote_address((struct sock_common *)sk, &remote);
> 
> fill a temporary allocated array of all known remote address. After
> releaseing the pm lock loop on such array and create a subflow for each
> remote address from the given local.
> 
> Note that the we could still use an array even for non 'fullmesh'
> endpoint: with a single entry corresponding to the primary MPC subflow
> remote address.
> 
> Suggested-by: Paolo Abeni <pabeni@redhat.com>
> Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
> ---
>  include/uapi/linux/mptcp.h |  1 +
>  net/mptcp/pm_netlink.c     | 80 +++++++++++++++++++++++++++++++++++---
>  2 files changed, 76 insertions(+), 5 deletions(-)
> 
> diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
> index 7b05f7102321..f66038b9551f 100644
> --- a/include/uapi/linux/mptcp.h
> +++ b/include/uapi/linux/mptcp.h
> @@ -73,6 +73,7 @@ enum {
>  #define MPTCP_PM_ADDR_FLAG_SIGNAL			(1 << 0)
>  #define MPTCP_PM_ADDR_FLAG_SUBFLOW			(1 << 1)
>  #define MPTCP_PM_ADDR_FLAG_BACKUP			(1 << 2)
> +#define MPTCP_PM_ADDR_FLAG_FULLMESH			(1 << 3)
>  
>  enum {
>  	MPTCP_PM_CMD_UNSPEC,
> diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
> index ba0e1d71504d..2259c424485f 100644
> --- a/net/mptcp/pm_netlink.c
> +++ b/net/mptcp/pm_netlink.c
> @@ -158,6 +158,27 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
>  	return false;
>  }
>  
> +static bool lookup_subflow_by_addrs(const struct list_head *list,
> +				    struct mptcp_addr_info *saddr,
> +				    struct mptcp_addr_info *daddr)
> +{
> +	struct mptcp_subflow_context *subflow;
> +	struct mptcp_addr_info local, remote;
> +	struct sock_common *skc;
> +
> +	list_for_each_entry(subflow, list, node) {
> +		skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
> +
> +		local_address(skc, &local);
> +		remote_address(skc, &remote);
> +		if (addresses_equal(&local, saddr, saddr->port) &&
> +		    addresses_equal(&remote, daddr, daddr->port))
> +			return true;
> +	}
> +
> +	return false;
> +}

I'm sorry for not noticing this earlier, do we need this function and
the related check in fill_remote_addresses_vec()?

'saddr' is the return value of select_local_address(), so existing
subflows is bound to such address. 

> +
>  static struct mptcp_pm_addr_entry *
>  select_local_address(const struct pm_nl_pernet *pernet,
>  		     struct mptcp_sock *msk)
> @@ -410,6 +431,53 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
>  	}
>  }
>  
> +static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr,
> +				  struct mptcp_addr_info *addr)
> +{
> +	int i;
> +
> +	for (i = 0; i < nr; i++) {
> +		if (addresses_equal(&addrs[i], addr, addr->port))
> +			return true;
> +	}
> +
> +	return false;
> +}
> +
> +static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
> +					      struct mptcp_pm_addr_entry *local,
> +					      struct mptcp_addr_info *addrs)
> +{
> +	struct sock *sk = (struct sock *)msk, *ssk;
> +	struct mptcp_subflow_context *subflow;
> +	struct mptcp_addr_info remote = { 0 };

Minor nit: no need to initialize the 'remote' variable

Cheers,

Paolo


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh
  2021-07-27  7:58 ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
  2021-07-27  7:58   ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local " Geliang Tang
  2021-07-27  9:38   ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote " Paolo Abeni
@ 2021-07-27  9:40   ` Paolo Abeni
  2021-07-27  9:51   ` Paolo Abeni
  3 siblings, 0 replies; 15+ messages in thread
From: Paolo Abeni @ 2021-07-27  9:40 UTC (permalink / raw)
  To: Geliang Tang, mptcp; +Cc: Geliang Tang

On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> +static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
> +					      struct mptcp_pm_addr_entry *local,
> +					      struct mptcp_addr_info *addrs)
> +{

I almost forgot another minor nit: some comments before the function
and/or in the main loop describing the function goal/behavior will make
the code more maintainable in the long term, thanks!

Paolo


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh
  2021-07-27  7:58 ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
                     ` (2 preceding siblings ...)
  2021-07-27  9:40   ` Paolo Abeni
@ 2021-07-27  9:51   ` Paolo Abeni
  2021-07-27 12:40     ` Geliang Tang
  3 siblings, 1 reply; 15+ messages in thread
From: Paolo Abeni @ 2021-07-27  9:51 UTC (permalink / raw)
  To: Geliang Tang, mptcp; +Cc: Geliang Tang

Hello,


I'm sorry for the partial feedback so far, I noticed more things
looking at the following patches. Again, I'm sorry!

On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> +static bool lookup_subflow_by_addrs(const struct list_head *list,
> +				    struct mptcp_addr_info *saddr,
> +				    struct mptcp_addr_info *daddr)
> +{

both 'saddr' and 'daddr' could be 'const', I think.

> +	struct mptcp_subflow_context *subflow;
> +	struct mptcp_addr_info local, remote;
> +	struct sock_common *skc;
> +
> +	list_for_each_entry(subflow, list, node) {
> +		skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
> +
> +		local_address(skc, &local);
> +		remote_address(skc, &remote);
> +		if (addresses_equal(&local, saddr, saddr->port) &&
> +		    addresses_equal(&remote, daddr, daddr->port))
> +			return true;
> +	}
> +
> +	return false;
> +}
> +
>  static struct mptcp_pm_addr_entry *
>  select_local_address(const struct pm_nl_pernet *pernet,
>  		     struct mptcp_sock *msk)
> @@ -410,6 +431,53 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
>  	}
>  }
>  
> +static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr,
> +				  struct mptcp_addr_info *addr)

'addr' could be 'const', I think.

> +{
> +	int i;
> +
> +	for (i = 0; i < nr; i++) {
> +		if (addresses_equal(&addrs[i], addr, addr->port))
> +			return true;
> +	}
> +
> +	return false;
> +}
> +
> +static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
> +					      struct mptcp_pm_addr_entry *local,
> +					      struct mptcp_addr_info *addrs)
> +{

'local' could be 'const', I think.

hopefully no more comments on this patch ;)

Cheers,

Paolo


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local addresses fullmesh
  2021-07-27  7:58   ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local " Geliang Tang
  2021-07-27  7:58     ` [MPTCP][PATCH v5 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag Geliang Tang
@ 2021-07-27  9:52     ` Paolo Abeni
  2021-07-27 12:51       ` Geliang Tang
  1 sibling, 1 reply; 15+ messages in thread
From: Paolo Abeni @ 2021-07-27  9:52 UTC (permalink / raw)
  To: Geliang Tang, mptcp; +Cc: Geliang Tang

On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> From: Geliang Tang <geliangtang@xiaomi.com>
> 
> In mptcp_pm_nl_add_addr_received(), fill a temporary allocate array of
> all local address corresponding to the fullmesh endpoint. If such array
> is empty, keep the current behavior.
> 
> Elsewhere loop on such array and create a subflow for each local address
> towards the given remote address
> 
> Suggested-by: Paolo Abeni <pabeni@redhat.com>
> Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
> ---
>  net/mptcp/pm_netlink.c | 60 ++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 55 insertions(+), 5 deletions(-)
> 
> diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
> index 2259c424485f..a85bac950f3b 100644
> --- a/net/mptcp/pm_netlink.c
> +++ b/net/mptcp/pm_netlink.c
> @@ -554,13 +554,62 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
>  	mptcp_pm_create_subflow_or_signal_addr(msk);
>  }
>  
> +static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
> +					     struct mptcp_addr_info *remote,
> +					     struct mptcp_pm_addr_entry *entries)

Minor nit: some comments here before the function describing it would
be helpful, thanks!

Also 'remote' could be 'const', I think.

> +{
> +	struct mptcp_pm_addr_entry local, *entry;
> +	struct sock *sk = (struct sock *)msk;
> +	struct pm_nl_pernet *pernet;
> +	unsigned int subflows_max;
> +	int i = 0;
> +
> +	pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
> +	subflows_max = mptcp_pm_get_subflows_max(msk);
> +
> +	rcu_read_lock();
> +	__mptcp_flush_join_list(msk);
> +	list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
> +		if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
> +			continue;
> +
> +		if (entry->addr.family != sk->sk_family) {
> +#if IS_ENABLED(CONFIG_MPTCP_IPV6)
> +			if ((entry->addr.family == AF_INET &&
> +			     !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
> +			    (sk->sk_family == AF_INET &&
> +			     !ipv6_addr_v4mapped(&entry->addr.addr6)))
> +#endif
> +				continue;
> +		}
> +
> +		if (!lookup_subflow_by_addrs(&msk->conn_list, &entry->addr, remote) &&
> +		    msk->pm.subflows < subflows_max) {
> +			msk->pm.subflows++;
> +			entries[i++] = *entry;
> +		}
> +	}
> +	rcu_read_unlock();
> +
> +	if (!i) {
> +		memset(&local, 0, sizeof(local));
> +		local.addr.family = remote->family;
> +
> +		msk->pm.subflows++;
> +		entries[i++] = local;
> +	}
> +
> +	return i;
> +}
> +
>  static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
>  {
> +	struct mptcp_pm_addr_entry entries[MPTCP_PM_ADDR_MAX];

mptcp_pm_addr_entry is quite larger than mptcp_addr_info (should be 64
bytes vs 24). 64 * 8 == 512 bytes could be a bit too much storage for
the stack. What about using instead:

struct mptcp_pm_addr_info addresses[MPTCP_PM_ADDR_MAX];
u8 flags[MPTCP_PM_ADDR_MAX];

?

And than pass the 2 arguments to fill_local_addresses_vec(), instead of
the single 'entries' arg.

Cheers,

Paolo


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases
  2021-07-27  7:58       ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Geliang Tang
  2021-07-27  7:58         ` [MPTCP][PATCH v5 mptcp-next 5/5] selftests: mptcp: delete uncontinuous removing ids Geliang Tang
@ 2021-07-27 10:03         ` Paolo Abeni
  2021-07-27 12:49           ` Geliang Tang
  1 sibling, 1 reply; 15+ messages in thread
From: Paolo Abeni @ 2021-07-27 10:03 UTC (permalink / raw)
  To: Geliang Tang, mptcp; +Cc: Geliang Tang

On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> From: Geliang Tang <geliangtang@xiaomi.com>
> 
> This patch added the testcases for the fullmesh address flag of the path
> manager.
> 
> Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
> ---
>  .../testing/selftests/net/mptcp/mptcp_join.sh | 57 ++++++++++++++++++-
>  1 file changed, 54 insertions(+), 3 deletions(-)
> 
> diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> index 937e861e9490..ca19762b9c6e 100755
> --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
> +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> @@ -367,7 +367,13 @@ do_transfer()
>  	fi
>  
>  	if [ $addr_nr_ns2 -gt 0 ]; then
> -		let add_nr_ns2=addr_nr_ns2
> +		if [ $addr_nr_ns2 -gt 10 ]; then
> +			let add_nr_ns2=addr_nr_ns2-10
> +			flags=subflow,fullmesh
> +		else
> +			let add_nr_ns2=addr_nr_ns2
> +			flags=subflow
> +		fi
>  		counter=3
>  		sleep 1
>  		while [ $add_nr_ns2 -gt 0 ]; do
> @@ -377,7 +383,7 @@ do_transfer()
>  			else
>  				addr="10.0.$counter.2"
>  			fi
> -			ip netns exec $ns2 ./pm_nl_ctl add $addr flags subflow
> +			ip netns exec $ns2 ./pm_nl_ctl add $addr flags $flags
>  			let counter+=1
>  			let add_nr_ns2-=1
>  		done
> @@ -1697,6 +1703,46 @@ deny_join_id0_tests()
>  	chk_join_nr "subflow and address allow join id0 2" 1 1 1
>  }
>  
> +fullmesh_tests()
> +{
> +	# fullmesh 1
> +	reset
> +	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow,fullmesh
> +	run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
> +	chk_join_nr "fullmesh test 1" 2 2 2
> +	chk_add_nr 1 1
> +
> +	# fullmesh 2
> +	reset
> +	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow,fullmesh
> +	ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow,fullmesh
> +	run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
> +	chk_join_nr "fullmesh test 2" 4 4 4
> +	chk_add_nr 1 1

This 2 tests are a bit redundand, I think. With a single remote known
address, the final topology will be the same as with non-fullmesh
subflows. Perhaps we can keep just one of this cases?

> +
> +	# fullmesh 3
> +	reset
> +	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal

Here I think we need at least one 'fullmesh' subflow in ns2? Otherwise
this will be a non-fullmesh topology.

> +	run_tests $ns1 $ns2 10.0.1.1 0 0 11 slow
> +	chk_join_nr "fullmesh test 3" 3 3 3
> +	chk_add_nr 1 1
> +
> +	# fullmesh 4
> +	reset
> +	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> +	ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal

And perhaps here 2 'fullmesh' subflows? So we test explicitly the most
expected conf:

Client		Server
C1 ----------->	S1
    \       /->
     \     /
    - \ --/
   /   \
  /     \
 /       ----->
C2 ----------->	S2

(not sure how much the above "graph" will be readable :)

Additionally it would be nice to explicitly test a fullmesh topology
where we hit the max_subflows limit, e.g. 2 local address 3 remote ones
and max_subflows=4



> +	run_tests $ns1 $ns2 10.0.1.1 0 0 12 slow
> +	chk_join_nr "fullmesh test 4" 5 5 5
> +	chk_add_nr 1 1
> +}
> +
>  all_tests()
>  {
>  	subflows_tests
> @@ -1712,6 +1758,7 @@ all_tests()
>  	syncookies_tests
>  	checksum_tests
>  	deny_join_id0_tests
> +	fullmesh_tests
>  }
>  
>  usage()
> @@ -1730,6 +1777,7 @@ usage()
>  	echo "  -k syncookies_tests"
>  	echo "  -S checksum_tests"
>  	echo "  -d deny_join_id0_tests"
> +	echo "  -m fullmesh_tests"
>  	echo "  -c capture pcap files"
>  	echo "  -C enable data checksum"
>  	echo "  -h help"
> @@ -1765,7 +1813,7 @@ if [ $do_all_tests -eq 1 ]; then
>  	exit $ret
>  fi
>  
> -while getopts 'fsltra64bpkdchCS' opt; do
> +while getopts 'fsltra64bpkdmchCS' opt; do
>  	case $opt in
>  		f)
>  			subflows_tests
> @@ -1806,6 +1854,9 @@ while getopts 'fsltra64bpkdchCS' opt; do
>  		d)
>  			deny_join_id0_tests
>  			;;
> +		m)
> +			fullmesh_tests
> +			;;
>  		c)
>  			;;
>  		C)


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh
  2021-07-27  9:38   ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote " Paolo Abeni
@ 2021-07-27 12:36     ` Geliang Tang
  0 siblings, 0 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27 12:36 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp, Geliang Tang

Paolo Abeni <pabeni@redhat.com> 于2021年7月27日周二 下午5:38写道:
>
> Hello,
>
> On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> > From: Geliang Tang <geliangtang@xiaomi.com>
> >
> > This patch added and managed a new per endpoint flag, named
> > MPTCP_PM_ADDR_FLAG_FULLMESH.
> >
> > In mptcp_pm_create_subflow_or_signal_addr(), if such flag is set, instead
> > of:
> >
> >         remote_address((struct sock_common *)sk, &remote);
> >
> > fill a temporary allocated array of all known remote address. After
> > releaseing the pm lock loop on such array and create a subflow for each
> > remote address from the given local.
> >
> > Note that the we could still use an array even for non 'fullmesh'
> > endpoint: with a single entry corresponding to the primary MPC subflow
> > remote address.
> >
> > Suggested-by: Paolo Abeni <pabeni@redhat.com>
> > Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
> > ---
> >  include/uapi/linux/mptcp.h |  1 +
> >  net/mptcp/pm_netlink.c     | 80 +++++++++++++++++++++++++++++++++++---
> >  2 files changed, 76 insertions(+), 5 deletions(-)
> >
> > diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
> > index 7b05f7102321..f66038b9551f 100644
> > --- a/include/uapi/linux/mptcp.h
> > +++ b/include/uapi/linux/mptcp.h
> > @@ -73,6 +73,7 @@ enum {
> >  #define MPTCP_PM_ADDR_FLAG_SIGNAL                    (1 << 0)
> >  #define MPTCP_PM_ADDR_FLAG_SUBFLOW                   (1 << 1)
> >  #define MPTCP_PM_ADDR_FLAG_BACKUP                    (1 << 2)
> > +#define MPTCP_PM_ADDR_FLAG_FULLMESH                  (1 << 3)
> >
> >  enum {
> >       MPTCP_PM_CMD_UNSPEC,
> > diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
> > index ba0e1d71504d..2259c424485f 100644
> > --- a/net/mptcp/pm_netlink.c
> > +++ b/net/mptcp/pm_netlink.c
> > @@ -158,6 +158,27 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
> >       return false;
> >  }
> >
> > +static bool lookup_subflow_by_addrs(const struct list_head *list,
> > +                                 struct mptcp_addr_info *saddr,
> > +                                 struct mptcp_addr_info *daddr)
> > +{
> > +     struct mptcp_subflow_context *subflow;
> > +     struct mptcp_addr_info local, remote;
> > +     struct sock_common *skc;
> > +
> > +     list_for_each_entry(subflow, list, node) {
> > +             skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
> > +
> > +             local_address(skc, &local);
> > +             remote_address(skc, &remote);
> > +             if (addresses_equal(&local, saddr, saddr->port) &&
> > +                 addresses_equal(&remote, daddr, daddr->port))
> > +                     return true;
> > +     }
> > +
> > +     return false;
> > +}
>
> I'm sorry for not noticing this earlier, do we need this function and
> the related check in fill_remote_addresses_vec()?
>
> 'saddr' is the return value of select_local_address(), so existing
> subflows is bound to such address.

I'll drop lookup_subflow_by_addrs() in v6.

>
> > +
> >  static struct mptcp_pm_addr_entry *
> >  select_local_address(const struct pm_nl_pernet *pernet,
> >                    struct mptcp_sock *msk)
> > @@ -410,6 +431,53 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
> >       }
> >  }
> >
> > +static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr,
> > +                               struct mptcp_addr_info *addr)
> > +{
> > +     int i;
> > +
> > +     for (i = 0; i < nr; i++) {
> > +             if (addresses_equal(&addrs[i], addr, addr->port))
> > +                     return true;
> > +     }
> > +
> > +     return false;
> > +}
> > +
> > +static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
> > +                                           struct mptcp_pm_addr_entry *local,
> > +                                           struct mptcp_addr_info *addrs)
> > +{
> > +     struct sock *sk = (struct sock *)msk, *ssk;
> > +     struct mptcp_subflow_context *subflow;
> > +     struct mptcp_addr_info remote = { 0 };
>
> Minor nit: no need to initialize the 'remote' variable

This id filed of 'remote' need to initialize, otherwise it will be a random
number.

>
> Cheers,
>
> Paolo
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh
  2021-07-27  9:51   ` Paolo Abeni
@ 2021-07-27 12:40     ` Geliang Tang
  0 siblings, 0 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27 12:40 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp, Geliang Tang

Paolo Abeni <pabeni@redhat.com> 于2021年7月27日周二 下午5:51写道:
>
> Hello,
>
>
> I'm sorry for the partial feedback so far, I noticed more things
> looking at the following patches. Again, I'm sorry!
>
> On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> > +static bool lookup_subflow_by_addrs(const struct list_head *list,
> > +                                 struct mptcp_addr_info *saddr,
> > +                                 struct mptcp_addr_info *daddr)
> > +{
>
> both 'saddr' and 'daddr' could be 'const', I think.
>
> > +     struct mptcp_subflow_context *subflow;
> > +     struct mptcp_addr_info local, remote;
> > +     struct sock_common *skc;
> > +
> > +     list_for_each_entry(subflow, list, node) {
> > +             skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
> > +
> > +             local_address(skc, &local);
> > +             remote_address(skc, &remote);
> > +             if (addresses_equal(&local, saddr, saddr->port) &&
> > +                 addresses_equal(&remote, daddr, daddr->port))
> > +                     return true;
> > +     }
> > +
> > +     return false;
> > +}
> > +
> >  static struct mptcp_pm_addr_entry *
> >  select_local_address(const struct pm_nl_pernet *pernet,
> >                    struct mptcp_sock *msk)
> > @@ -410,6 +431,53 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
> >       }
> >  }
> >
> > +static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr,
> > +                               struct mptcp_addr_info *addr)
>
> 'addr' could be 'const', I think.

It can be 'const', since the 2nd argument of addresses_equal is non-const.

And all the other 'const's in this comment too.

>
> > +{
> > +     int i;
> > +
> > +     for (i = 0; i < nr; i++) {
> > +             if (addresses_equal(&addrs[i], addr, addr->port))
> > +                     return true;
> > +     }
> > +
> > +     return false;
> > +}
> > +
> > +static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
> > +                                           struct mptcp_pm_addr_entry *local,
> > +                                           struct mptcp_addr_info *addrs)
> > +{
>
> 'local' could be 'const', I think.
>
> hopefully no more comments on this patch ;)
>
> Cheers,
>
> Paolo
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases
  2021-07-27 10:03         ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Paolo Abeni
@ 2021-07-27 12:49           ` Geliang Tang
  0 siblings, 0 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27 12:49 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp, Geliang Tang

Paolo Abeni <pabeni@redhat.com> 于2021年7月27日周二 下午6:03写道:
>
> On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> > From: Geliang Tang <geliangtang@xiaomi.com>
> >
> > This patch added the testcases for the fullmesh address flag of the path
> > manager.
> >
> > Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
> > ---
> >  .../testing/selftests/net/mptcp/mptcp_join.sh | 57 ++++++++++++++++++-
> >  1 file changed, 54 insertions(+), 3 deletions(-)
> >
> > diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> > index 937e861e9490..ca19762b9c6e 100755
> > --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
> > +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> > @@ -367,7 +367,13 @@ do_transfer()
> >       fi
> >
> >       if [ $addr_nr_ns2 -gt 0 ]; then
> > -             let add_nr_ns2=addr_nr_ns2
> > +             if [ $addr_nr_ns2 -gt 10 ]; then
> > +                     let add_nr_ns2=addr_nr_ns2-10
> > +                     flags=subflow,fullmesh
> > +             else
> > +                     let add_nr_ns2=addr_nr_ns2
> > +                     flags=subflow
> > +             fi
> >               counter=3
> >               sleep 1
> >               while [ $add_nr_ns2 -gt 0 ]; do
> > @@ -377,7 +383,7 @@ do_transfer()
> >                       else
> >                               addr="10.0.$counter.2"
> >                       fi
> > -                     ip netns exec $ns2 ./pm_nl_ctl add $addr flags subflow
> > +                     ip netns exec $ns2 ./pm_nl_ctl add $addr flags $flags
> >                       let counter+=1
> >                       let add_nr_ns2-=1
> >               done
> > @@ -1697,6 +1703,46 @@ deny_join_id0_tests()
> >       chk_join_nr "subflow and address allow join id0 2" 1 1 1
> >  }
> >
> > +fullmesh_tests()
> > +{
> > +     # fullmesh 1
> > +     reset
> > +     ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow,fullmesh
> > +     run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
> > +     chk_join_nr "fullmesh test 1" 2 2 2
> > +     chk_add_nr 1 1
> > +
> > +     # fullmesh 2
> > +     reset
> > +     ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow,fullmesh
> > +     ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow,fullmesh
> > +     run_tests $ns1 $ns2 10.0.1.1 0 1 0 slow
> > +     chk_join_nr "fullmesh test 2" 4 4 4
> > +     chk_add_nr 1 1
>
> This 2 tests are a bit redundand, I think. With a single remote known
> address, the final topology will be the same as with non-fullmesh
> subflows. Perhaps we can keep just one of this cases?

Drop test 1 in v6.

>
> > +
> > +     # fullmesh 3
> > +     reset
> > +     ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
>
> Here I think we need at least one 'fullmesh' subflow in ns2? Otherwise
> this will be a non-fullmesh topology.
>
> > +     run_tests $ns1 $ns2 10.0.1.1 0 0 11 slow

This 11 means one fullmesh subflow.

> > +     chk_join_nr "fullmesh test 3" 3 3 3
> > +     chk_add_nr 1 1
> > +
> > +     # fullmesh 4
> > +     reset
> > +     ip netns exec $ns1 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns2 ./pm_nl_ctl limits 8 8
> > +     ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
>
> And perhaps here 2 'fullmesh' subflows? So we test explicitly the most
> expected conf:

And 12 means two fullmesh subflows. I should add some comments here.

>
> Client          Server
> C1 -----------> S1
>     \       /->
>      \     /
>     - \ --/
>    /   \
>   /     \
>  /       ----->
> C2 -----------> S2
>
> (not sure how much the above "graph" will be readable :)
>
> Additionally it would be nice to explicitly test a fullmesh topology
> where we hit the max_subflows limit, e.g. 2 local address 3 remote ones
> and max_subflows=4

Add a limit test in v6.

>
>
>
> > +     run_tests $ns1 $ns2 10.0.1.1 0 0 12 slow
> > +     chk_join_nr "fullmesh test 4" 5 5 5
> > +     chk_add_nr 1 1
> > +}
> > +
> >  all_tests()
> >  {
> >       subflows_tests
> > @@ -1712,6 +1758,7 @@ all_tests()
> >       syncookies_tests
> >       checksum_tests
> >       deny_join_id0_tests
> > +     fullmesh_tests
> >  }
> >
> >  usage()
> > @@ -1730,6 +1777,7 @@ usage()
> >       echo "  -k syncookies_tests"
> >       echo "  -S checksum_tests"
> >       echo "  -d deny_join_id0_tests"
> > +     echo "  -m fullmesh_tests"
> >       echo "  -c capture pcap files"
> >       echo "  -C enable data checksum"
> >       echo "  -h help"
> > @@ -1765,7 +1813,7 @@ if [ $do_all_tests -eq 1 ]; then
> >       exit $ret
> >  fi
> >
> > -while getopts 'fsltra64bpkdchCS' opt; do
> > +while getopts 'fsltra64bpkdmchCS' opt; do
> >       case $opt in
> >               f)
> >                       subflows_tests
> > @@ -1806,6 +1854,9 @@ while getopts 'fsltra64bpkdchCS' opt; do
> >               d)
> >                       deny_join_id0_tests
> >                       ;;
> > +             m)
> > +                     fullmesh_tests
> > +                     ;;
> >               c)
> >                       ;;
> >               C)
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local addresses fullmesh
  2021-07-27  9:52     ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local addresses fullmesh Paolo Abeni
@ 2021-07-27 12:51       ` Geliang Tang
  0 siblings, 0 replies; 15+ messages in thread
From: Geliang Tang @ 2021-07-27 12:51 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp, Geliang Tang

Paolo Abeni <pabeni@redhat.com> 于2021年7月27日周二 下午5:52写道:
>
> On Tue, 2021-07-27 at 15:58 +0800, Geliang Tang wrote:
> > From: Geliang Tang <geliangtang@xiaomi.com>
> >
> > In mptcp_pm_nl_add_addr_received(), fill a temporary allocate array of
> > all local address corresponding to the fullmesh endpoint. If such array
> > is empty, keep the current behavior.
> >
> > Elsewhere loop on such array and create a subflow for each local address
> > towards the given remote address
> >
> > Suggested-by: Paolo Abeni <pabeni@redhat.com>
> > Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
> > ---
> >  net/mptcp/pm_netlink.c | 60 ++++++++++++++++++++++++++++++++++++++----
> >  1 file changed, 55 insertions(+), 5 deletions(-)
> >
> > diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
> > index 2259c424485f..a85bac950f3b 100644
> > --- a/net/mptcp/pm_netlink.c
> > +++ b/net/mptcp/pm_netlink.c
> > @@ -554,13 +554,62 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
> >       mptcp_pm_create_subflow_or_signal_addr(msk);
> >  }
> >
> > +static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
> > +                                          struct mptcp_addr_info *remote,
> > +                                          struct mptcp_pm_addr_entry *entries)
>
> Minor nit: some comments here before the function describing it would
> be helpful, thanks!

Sure. added in v6.

>
> Also 'remote' could be 'const', I think.
>
> > +{
> > +     struct mptcp_pm_addr_entry local, *entry;
> > +     struct sock *sk = (struct sock *)msk;
> > +     struct pm_nl_pernet *pernet;
> > +     unsigned int subflows_max;
> > +     int i = 0;
> > +
> > +     pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
> > +     subflows_max = mptcp_pm_get_subflows_max(msk);
> > +
> > +     rcu_read_lock();
> > +     __mptcp_flush_join_list(msk);
> > +     list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
> > +             if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
> > +                     continue;
> > +
> > +             if (entry->addr.family != sk->sk_family) {
> > +#if IS_ENABLED(CONFIG_MPTCP_IPV6)
> > +                     if ((entry->addr.family == AF_INET &&
> > +                          !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
> > +                         (sk->sk_family == AF_INET &&
> > +                          !ipv6_addr_v4mapped(&entry->addr.addr6)))
> > +#endif
> > +                             continue;
> > +             }
> > +
> > +             if (!lookup_subflow_by_addrs(&msk->conn_list, &entry->addr, remote) &&
> > +                 msk->pm.subflows < subflows_max) {
> > +                     msk->pm.subflows++;
> > +                     entries[i++] = *entry;
> > +             }
> > +     }
> > +     rcu_read_unlock();
> > +
> > +     if (!i) {
> > +             memset(&local, 0, sizeof(local));
> > +             local.addr.family = remote->family;
> > +
> > +             msk->pm.subflows++;
> > +             entries[i++] = local;
> > +     }
> > +
> > +     return i;
> > +}
> > +
> >  static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
> >  {
> > +     struct mptcp_pm_addr_entry entries[MPTCP_PM_ADDR_MAX];
>
> mptcp_pm_addr_entry is quite larger than mptcp_addr_info (should be 64
> bytes vs 24). 64 * 8 == 512 bytes could be a bit too much storage for
> the stack. What about using instead:
>
> struct mptcp_pm_addr_info addresses[MPTCP_PM_ADDR_MAX];
> u8 flags[MPTCP_PM_ADDR_MAX];
>
> ?
>
> And than pass the 2 arguments to fill_local_addresses_vec(), instead of
> the single 'entries' arg.

Pass 3 arguments 'addr', 'flags' and 'ifindex' instead of 'entries' in v6.

Thanks,
-Geliang

>
> Cheers,
>
> Paolo
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2021-07-27 12:51 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-27  7:58 [MPTCP][PATCH v5 mptcp-next 0/5] fullmesh path manager support Geliang Tang
2021-07-27  7:58 ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
2021-07-27  7:58   ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local " Geliang Tang
2021-07-27  7:58     ` [MPTCP][PATCH v5 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag Geliang Tang
2021-07-27  7:58       ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Geliang Tang
2021-07-27  7:58         ` [MPTCP][PATCH v5 mptcp-next 5/5] selftests: mptcp: delete uncontinuous removing ids Geliang Tang
2021-07-27 10:03         ` [MPTCP][PATCH v5 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Paolo Abeni
2021-07-27 12:49           ` Geliang Tang
2021-07-27  9:52     ` [MPTCP][PATCH v5 mptcp-next 2/5] mptcp: local addresses fullmesh Paolo Abeni
2021-07-27 12:51       ` Geliang Tang
2021-07-27  9:38   ` [MPTCP][PATCH v5 mptcp-next 1/5] mptcp: remote " Paolo Abeni
2021-07-27 12:36     ` Geliang Tang
2021-07-27  9:40   ` Paolo Abeni
2021-07-27  9:51   ` Paolo Abeni
2021-07-27 12:40     ` Geliang Tang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).