mptcp.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
* [MPTCP][PATCH v4 mptcp-next 0/5] fullmesh path manager support
@ 2021-07-26 11:08 Geliang Tang
  2021-07-26 11:08 ` [MPTCP][PATCH v4 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
  0 siblings, 1 reply; 6+ messages in thread
From: Geliang Tang @ 2021-07-26 11:08 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

v4:
 - add new helpers, fill_local/remote_addresses_vec
 - add max_subflows checks
 - add 'local' into the local addresses array only when no fullmesh
   entry found.
 - add signal,fullmesh check

v3:
 - the in-kernel fullmesh path manager has been dropped from this
   patchset, only keep the fullmesh flag support code.

v2:
 - Implement the fullmesh mode as an extension to the netlink PM, not a
   standalone PM as Paolo suggested.
 - drop duplicate code.
 - add a new per endpoint flag MPTCP_PM_ADDR_FLAG_FULLMESH.

Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/193

Geliang Tang (5):
  mptcp: remote addresses fullmesh
  mptcp: local addresses fullmesh
  selftests: mptcp: set and print the fullmesh flag
  selftests: mptcp: add fullmesh testcases
  selftests: mptcp: delete uncontinuous removing ids

 include/uapi/linux/mptcp.h                    |   1 +
 net/mptcp/pm_netlink.c                        | 125 ++++++++++++++++--
 .../testing/selftests/net/mptcp/mptcp_join.sh |  59 +++++++--
 tools/testing/selftests/net/mptcp/pm_nl_ctl.c |  16 ++-
 4 files changed, 181 insertions(+), 20 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [MPTCP][PATCH v4 mptcp-next 1/5] mptcp: remote addresses fullmesh
  2021-07-26 11:08 [MPTCP][PATCH v4 mptcp-next 0/5] fullmesh path manager support Geliang Tang
@ 2021-07-26 11:08 ` Geliang Tang
  2021-07-26 11:08   ` [MPTCP][PATCH v4 mptcp-next 2/5] mptcp: local " Geliang Tang
  0 siblings, 1 reply; 6+ messages in thread
From: Geliang Tang @ 2021-07-26 11:08 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang, Paolo Abeni

From: Geliang Tang <geliangtang@xiaomi.com>

This patch added and managed a new per endpoint flag, named
MPTCP_PM_ADDR_FLAG_FULLMESH.

In mptcp_pm_create_subflow_or_signal_addr(), if such flag is set, instead
of:

        remote_address((struct sock_common *)sk, &remote);

fill a temporary allocated array of all known remote address. After
releaseing the pm lock loop on such array and create a subflow for each
remote address from the given local.

Note that the we could still use an array even for non 'fullmesh'
endpoint: with a single entry corresponding to the primary MPC subflow
remote address.

Suggested-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 include/uapi/linux/mptcp.h |  1 +
 net/mptcp/pm_netlink.c     | 66 +++++++++++++++++++++++++++++++++++---
 2 files changed, 62 insertions(+), 5 deletions(-)

diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 7b05f7102321..f66038b9551f 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -73,6 +73,7 @@ enum {
 #define MPTCP_PM_ADDR_FLAG_SIGNAL			(1 << 0)
 #define MPTCP_PM_ADDR_FLAG_SUBFLOW			(1 << 1)
 #define MPTCP_PM_ADDR_FLAG_BACKUP			(1 << 2)
+#define MPTCP_PM_ADDR_FLAG_FULLMESH			(1 << 3)
 
 enum {
 	MPTCP_PM_CMD_UNSPEC,
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index ba0e1d71504d..2a09d28d98be 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -158,6 +158,27 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
 	return false;
 }
 
+static bool lookup_subflow_by_addrs(const struct list_head *list,
+				    struct mptcp_addr_info *saddr,
+				    struct mptcp_addr_info *daddr)
+{
+	struct mptcp_subflow_context *subflow;
+	struct mptcp_addr_info local, remote;
+	struct sock_common *skc;
+
+	list_for_each_entry(subflow, list, node) {
+		skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
+
+		local_address(skc, &local);
+		remote_address(skc, &remote);
+		if (addresses_equal(&local, saddr, saddr->port) &&
+		    addresses_equal(&remote, daddr, daddr->port))
+			return true;
+	}
+
+	return false;
+}
+
 static struct mptcp_pm_addr_entry *
 select_local_address(const struct pm_nl_pernet *pernet,
 		     struct mptcp_sock *msk)
@@ -410,6 +431,39 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
 	}
 }
 
+static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
+					      struct mptcp_pm_addr_entry *local,
+					      struct mptcp_addr_info *addrs)
+{
+	struct sock *sk = (struct sock *)msk, *ssk;
+	struct mptcp_subflow_context *subflow;
+	struct mptcp_addr_info remote = { 0 };
+	struct pm_nl_pernet *pernet;
+	unsigned int subflows_max;
+	int i = 0;
+
+	pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
+	subflows_max = mptcp_pm_get_subflows_max(msk);
+
+	if (!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) {
+		remote_address((struct sock_common *)sk, &remote);
+		msk->pm.subflows++;
+		addrs[i++] = remote;
+	} else {
+		mptcp_for_each_subflow(msk, subflow) {
+			ssk = mptcp_subflow_tcp_sock(subflow);
+			remote_address((struct sock_common *)ssk, &remote);
+			if (!lookup_subflow_by_addrs(&msk->conn_list, &local->addr, &remote) &&
+			    msk->pm.subflows < subflows_max) {
+				msk->pm.subflows++;
+				addrs[i++] = remote;
+			}
+		}
+	}
+
+	return i;
+}
+
 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
 {
 	struct sock *sk = (struct sock *)msk;
@@ -455,15 +509,17 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
 	    !READ_ONCE(msk->pm.remote_deny_join_id0)) {
 		local = select_local_address(pernet, msk);
 		if (local) {
-			struct mptcp_addr_info remote = { 0 };
+			struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
+			int i, nr;
 
 			msk->pm.local_addr_used++;
-			msk->pm.subflows++;
 			check_work_pending(msk);
-			remote_address((struct sock_common *)sk, &remote);
+			nr = fill_remote_addresses_vec(msk, local, addrs);
 			spin_unlock_bh(&msk->pm.lock);
-			__mptcp_subflow_connect(sk, &local->addr, &remote,
-						local->flags, local->ifindex);
+			for (i = 0; i < nr; i++) {
+				__mptcp_subflow_connect(sk, &local->addr, &addrs[i],
+							local->flags, local->ifindex);
+			}
 			spin_lock_bh(&msk->pm.lock);
 			return;
 		}
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [MPTCP][PATCH v4 mptcp-next 2/5] mptcp: local addresses fullmesh
  2021-07-26 11:08 ` [MPTCP][PATCH v4 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
@ 2021-07-26 11:08   ` Geliang Tang
  2021-07-26 11:08     ` [MPTCP][PATCH v4 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag Geliang Tang
  0 siblings, 1 reply; 6+ messages in thread
From: Geliang Tang @ 2021-07-26 11:08 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang, Paolo Abeni

From: Geliang Tang <geliangtang@xiaomi.com>

In mptcp_pm_nl_add_addr_received(), fill a temporary allocate array of
all local address corresponding to the fullmesh endpoint. If such array
is empty, keep the current behavior.

Elsewhere loop on such array and create a subflow for each local address
towards the given remote address

Suggested-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 net/mptcp/pm_netlink.c | 59 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 54 insertions(+), 5 deletions(-)

diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 2a09d28d98be..10f09a96952b 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -540,13 +540,61 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
 	mptcp_pm_create_subflow_or_signal_addr(msk);
 }
 
+static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+					     struct mptcp_addr_info *remote,
+					     struct mptcp_pm_addr_entry *entries)
+{
+	struct mptcp_pm_addr_entry local, *entry;
+	struct sock *sk = (struct sock *)msk;
+	struct pm_nl_pernet *pernet;
+	unsigned int subflows_max;
+	int i = 0;
+
+	pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
+	subflows_max = mptcp_pm_get_subflows_max(msk);
+
+	rcu_read_lock();
+	__mptcp_flush_join_list(msk);
+	list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+		if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
+			continue;
+
+		if (entry->addr.family != sk->sk_family) {
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+			if ((entry->addr.family == AF_INET &&
+			     !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
+			    (sk->sk_family == AF_INET &&
+			     !ipv6_addr_v4mapped(&entry->addr.addr6)))
+#endif
+				continue;
+		}
+
+		if (!lookup_subflow_by_addrs(&msk->conn_list, &entry->addr, remote) &&
+		    msk->pm.subflows < subflows_max) {
+			msk->pm.subflows++;
+			entries[i++] = *entry;
+		}
+	}
+	rcu_read_unlock();
+
+	if (!i) {
+		memset(&local, 0, sizeof(local));
+		local.addr.family = remote->family;
+
+		entries[i++] = local;
+	}
+
+	return i;
+}
+
 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
 {
+	struct mptcp_pm_addr_entry entries[MPTCP_PM_ADDR_MAX];
 	struct sock *sk = (struct sock *)msk;
 	unsigned int add_addr_accept_max;
 	struct mptcp_addr_info remote;
-	struct mptcp_addr_info local;
 	unsigned int subflows_max;
+	int i, nr;
 
 	add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
 	subflows_max = mptcp_pm_get_subflows_max(msk);
@@ -570,11 +618,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
 	remote = msk->pm.remote;
 	if (!remote.port)
 		remote.port = sk->sk_dport;
-	memset(&local, 0, sizeof(local));
-	local.family = remote.family;
-
+	nr = fill_local_addresses_vec(msk, &remote, entries);
 	spin_unlock_bh(&msk->pm.lock);
-	__mptcp_subflow_connect(sk, &local, &remote, 0, 0);
+	for (i = 0; i < nr; i++) {
+		__mptcp_subflow_connect(sk, &entries[i].addr, &remote,
+					entries[i].flags, entries[i].ifindex);
+	}
 	spin_lock_bh(&msk->pm.lock);
 
 add_addr_echo:
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [MPTCP][PATCH v4 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag
  2021-07-26 11:08   ` [MPTCP][PATCH v4 mptcp-next 2/5] mptcp: local " Geliang Tang
@ 2021-07-26 11:08     ` Geliang Tang
  2021-07-26 11:08       ` [MPTCP][PATCH v4 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Geliang Tang
  0 siblings, 1 reply; 6+ messages in thread
From: Geliang Tang @ 2021-07-26 11:08 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

This patch dealt with the MPTCP_PM_ADDR_FLAG_FULLMESH flag in add_addr()
and print_addr(), to set and print out the fullmesh flag.

Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 tools/testing/selftests/net/mptcp/pm_nl_ctl.c | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index 115decfdc1ef..354784512748 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -25,7 +25,7 @@
 static void syntax(char *argv[])
 {
 	fprintf(stderr, "%s add|get|set|del|flush|dump|accept [<args>]\n", argv[0]);
-	fprintf(stderr, "\tadd [flags signal|subflow|backup] [id <nr>] [dev <name>] <ip>\n");
+	fprintf(stderr, "\tadd [flags signal|subflow|backup|fullmesh] [id <nr>] [dev <name>] <ip>\n");
 	fprintf(stderr, "\tdel <id> [<ip>]\n");
 	fprintf(stderr, "\tget <id>\n");
 	fprintf(stderr, "\tset <ip> [flags backup|nobackup]\n");
@@ -236,11 +236,18 @@ int add_addr(int fd, int pm_family, int argc, char *argv[])
 					flags |= MPTCP_PM_ADDR_FLAG_SIGNAL;
 				else if (!strcmp(tok, "backup"))
 					flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
+				else if (!strcmp(tok, "fullmesh"))
+					flags |= MPTCP_PM_ADDR_FLAG_FULLMESH;
 				else
 					error(1, errno,
 					      "unknown flag %s", argv[arg]);
 			}
 
+			if (flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
+			    flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+				error(1, errno, "error flag fullmesh");
+			}
+
 			rta = (void *)(data + off);
 			rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
 			rta->rta_len = RTA_LENGTH(4);
@@ -422,6 +429,13 @@ static void print_addr(struct rtattr *attrs, int len)
 					printf(",");
 			}
 
+			if (flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
+				printf("fullmesh");
+				flags &= ~MPTCP_PM_ADDR_FLAG_FULLMESH;
+				if (flags)
+					printf(",");
+			}
+
 			/* bump unknown flags, if any */
 			if (flags)
 				printf("0x%x", flags);
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [MPTCP][PATCH v4 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases
  2021-07-26 11:08     ` [MPTCP][PATCH v4 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag Geliang Tang
@ 2021-07-26 11:08       ` Geliang Tang
  2021-07-26 11:08         ` [MPTCP][PATCH v4 mptcp-next 5/5] selftests: mptcp: delete uncontinuous removing ids Geliang Tang
  0 siblings, 1 reply; 6+ messages in thread
From: Geliang Tang @ 2021-07-26 11:08 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

This patch added the testcases for the fullmesh address flag of the path
manager.

Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 .../testing/selftests/net/mptcp/mptcp_join.sh | 49 +++++++++++++++++--
 1 file changed, 44 insertions(+), 5 deletions(-)

diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 937e861e9490..9ceb6052b513 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -325,7 +325,13 @@ do_transfer()
 	cpid=$!
 
 	if [ $addr_nr_ns1 -gt 0 ]; then
-		let add_nr_ns1=addr_nr_ns1
+		if [ $addr_nr_ns1 -gt 10 ]; then
+			let add_nr_ns1=addr_nr_ns1-10
+			flags=signal,fullmesh
+		else
+			let add_nr_ns1=addr_nr_ns1
+			flags=signal
+		fi
 		counter=2
 		sleep 1
 		while [ $add_nr_ns1 -gt 0 ]; do
@@ -335,7 +341,7 @@ do_transfer()
 			else
 				addr="10.0.$counter.1"
 			fi
-			ip netns exec $ns1 ./pm_nl_ctl add $addr flags signal
+			ip netns exec $ns1 ./pm_nl_ctl add $addr flags $flags
 			let counter+=1
 			let add_nr_ns1-=1
 		done
@@ -367,7 +373,13 @@ do_transfer()
 	fi
 
 	if [ $addr_nr_ns2 -gt 0 ]; then
-		let add_nr_ns2=addr_nr_ns2
+		if [ $addr_nr_ns2 -gt 10 ]; then
+			let add_nr_ns2=addr_nr_ns2-10
+			flags=subflow,fullmesh
+		else
+			let add_nr_ns2=addr_nr_ns2
+			flags=subflow
+		fi
 		counter=3
 		sleep 1
 		while [ $add_nr_ns2 -gt 0 ]; do
@@ -377,7 +389,7 @@ do_transfer()
 			else
 				addr="10.0.$counter.2"
 			fi
-			ip netns exec $ns2 ./pm_nl_ctl add $addr flags subflow
+			ip netns exec $ns2 ./pm_nl_ctl add $addr flags $flags
 			let counter+=1
 			let add_nr_ns2-=1
 		done
@@ -1697,6 +1709,28 @@ deny_join_id0_tests()
 	chk_join_nr "subflow and address allow join id0 2" 1 1 1
 }
 
+fullmesh_tests()
+{
+	# fullmesh 1
+	reset
+	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+	ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow,fullmesh
+	run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+	chk_join_nr "signal address fullmesh 1" 2 2 2
+	chk_add_nr 1 1
+
+	# fullmesh 2
+	reset
+	ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns2 ./pm_nl_ctl limits 8 8
+	ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+	run_tests $ns1 $ns2 10.0.1.1 0 0 11 slow
+	chk_join_nr "signal address fullmesh 2" 3 3 3
+	chk_add_nr 1 1
+}
+
 all_tests()
 {
 	subflows_tests
@@ -1712,6 +1746,7 @@ all_tests()
 	syncookies_tests
 	checksum_tests
 	deny_join_id0_tests
+	fullmesh_tests
 }
 
 usage()
@@ -1730,6 +1765,7 @@ usage()
 	echo "  -k syncookies_tests"
 	echo "  -S checksum_tests"
 	echo "  -d deny_join_id0_tests"
+	echo "  -m fullmesh_tests"
 	echo "  -c capture pcap files"
 	echo "  -C enable data checksum"
 	echo "  -h help"
@@ -1765,7 +1801,7 @@ if [ $do_all_tests -eq 1 ]; then
 	exit $ret
 fi
 
-while getopts 'fsltra64bpkdchCS' opt; do
+while getopts 'fsltra64bpkdmchCS' opt; do
 	case $opt in
 		f)
 			subflows_tests
@@ -1806,6 +1842,9 @@ while getopts 'fsltra64bpkdchCS' opt; do
 		d)
 			deny_join_id0_tests
 			;;
+		m)
+			fullmesh_tests
+			;;
 		c)
 			;;
 		C)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [MPTCP][PATCH v4 mptcp-next 5/5] selftests: mptcp: delete uncontinuous removing ids
  2021-07-26 11:08       ` [MPTCP][PATCH v4 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Geliang Tang
@ 2021-07-26 11:08         ` Geliang Tang
  0 siblings, 0 replies; 6+ messages in thread
From: Geliang Tang @ 2021-07-26 11:08 UTC (permalink / raw)
  To: mptcp, geliangtang; +Cc: Geliang Tang

From: Geliang Tang <geliangtang@xiaomi.com>

The removing addresses testcases can only deal with the continuous ids.
This patch added the uncontinuous removing ids support.

Fixes: f87744ad42446 ("selftests: mptcp: set addr id for removing testcases")
Signed-off-by: Geliang Tang <geliangtang@xiaomi.com>
---
 tools/testing/selftests/net/mptcp/mptcp_join.sh | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 9ceb6052b513..5c2fb282bf14 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -350,17 +350,18 @@ do_transfer()
 		let rm_nr_ns1=-addr_nr_ns1
 		if [ $rm_nr_ns1 -lt 8 ]; then
 			counter=1
+			pos=1
 			dump=(`ip netns exec ${listener_ns} ./pm_nl_ctl dump`)
 			if [ ${#dump[@]} -gt 0 ]; then
-				id=${dump[1]}
 				sleep 1
 
 				while [ $counter -le $rm_nr_ns1 ]
 				do
+					id=${dump[$pos]}
 					ip netns exec ${listener_ns} ./pm_nl_ctl del $id
 					sleep 1
 					let counter+=1
-					let id+=1
+					let pos+=5
 				done
 			fi
 		elif [ $rm_nr_ns1 -eq 8 ]; then
@@ -398,17 +399,18 @@ do_transfer()
 		let rm_nr_ns2=-addr_nr_ns2
 		if [ $rm_nr_ns2 -lt 8 ]; then
 			counter=1
+			pos=1
 			dump=(`ip netns exec ${connector_ns} ./pm_nl_ctl dump`)
 			if [ ${#dump[@]} -gt 0 ]; then
-				id=${dump[1]}
 				sleep 1
 
 				while [ $counter -le $rm_nr_ns2 ]
 				do
+					id=${dump[$pos]}
 					ip netns exec ${connector_ns} ./pm_nl_ctl del $id
 					sleep 1
 					let counter+=1
-					let id+=1
+					let pos+=5
 				done
 			fi
 		elif [ $rm_nr_ns2 -eq 8 ]; then
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-07-26 11:08 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-26 11:08 [MPTCP][PATCH v4 mptcp-next 0/5] fullmesh path manager support Geliang Tang
2021-07-26 11:08 ` [MPTCP][PATCH v4 mptcp-next 1/5] mptcp: remote addresses fullmesh Geliang Tang
2021-07-26 11:08   ` [MPTCP][PATCH v4 mptcp-next 2/5] mptcp: local " Geliang Tang
2021-07-26 11:08     ` [MPTCP][PATCH v4 mptcp-next 3/5] selftests: mptcp: set and print the fullmesh flag Geliang Tang
2021-07-26 11:08       ` [MPTCP][PATCH v4 mptcp-next 4/5] selftests: mptcp: add fullmesh testcases Geliang Tang
2021-07-26 11:08         ` [MPTCP][PATCH v4 mptcp-next 5/5] selftests: mptcp: delete uncontinuous removing ids Geliang Tang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).