From: Geliang Tang <geliangtang@gmail.com>
To: mptcp@lists.linux.dev
Cc: Geliang Tang <geliangtang@gmail.com>
Subject: [MPTCP][PATCH mptcp-next 3/9] mptcp: add fullmesh worker
Date: Fri, 9 Jul 2021 19:04:30 +0800 [thread overview]
Message-ID: <8b000137623aa94ab4f7c6b883c2003d91f3d147.1625825505.git.geliangtang@gmail.com> (raw)
In-Reply-To: <b699c90050c80f3c79b5fa42d1d9c18c46a21014.1625825505.git.geliangtang@gmail.com>
This patch implemented the fullmesh worker named mptcp_pm_fm_work. In
it, deal with the PM established status and invoke the function
mptcp_pm_fm_create_subflow to create the subflow.
Signed-off-by: Geliang Tang <geliangtang@gmail.com>
---
net/mptcp/pm_fullmesh.c | 95 +++++++++++++++++++++++++++++++++++++++++
net/mptcp/pm_netlink.c | 8 ++--
net/mptcp/protocol.c | 2 +
net/mptcp/protocol.h | 5 +++
4 files changed, 106 insertions(+), 4 deletions(-)
diff --git a/net/mptcp/pm_fullmesh.c b/net/mptcp/pm_fullmesh.c
index 4cdcb572b125..b27f13a031e3 100644
--- a/net/mptcp/pm_fullmesh.c
+++ b/net/mptcp/pm_fullmesh.c
@@ -26,6 +26,101 @@ struct pm_fm_pernet {
unsigned int next_id;
};
+static struct mptcp_fm_addr_entry *
+select_local_address(const struct pm_fm_pernet *pernet,
+ struct mptcp_sock *msk)
+{
+ struct mptcp_fm_addr_entry *entry, *ret = NULL;
+ struct sock *sk = (struct sock *)msk;
+
+ msk_owned_by_me(msk);
+
+ rcu_read_lock();
+ __mptcp_flush_join_list(msk);
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (entry->addr.family != sk->sk_family)
+ continue;
+
+ if (!lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) {
+ ret = entry;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static void check_work_pending(struct mptcp_sock *msk)
+{
+ if (msk->pm.local_addr_used == MPTCP_PM_FM_MAX_ADDR ||
+ msk->pm.subflows == MPTCP_PM_FM_MAX_ADDR)
+ WRITE_ONCE(msk->pm.work_pending, false);
+}
+
+static void mptcp_pm_fm_create_subflow(struct mptcp_sock *msk)
+{
+ unsigned int local_addr_max = MPTCP_PM_FM_MAX_ADDR;
+ unsigned int subflows_max = MPTCP_PM_FM_MAX_ADDR;
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_fm_addr_entry *local;
+ struct pm_fm_pernet *pernet;
+
+ pernet = net_generic(sock_net(sk), pm_fm_pernet_id);
+
+ if (msk->pm.local_addr_used < local_addr_max &&
+ msk->pm.subflows < subflows_max &&
+ !READ_ONCE(msk->pm.remote_deny_join_id0)) {
+ local = select_local_address(pernet, msk);
+ if (local) {
+ struct mptcp_addr_info remote = { 0 };
+
+ msk->pm.local_addr_used++;
+ msk->pm.subflows++;
+ check_work_pending(msk);
+ remote_address((struct sock_common *)sk, &remote);
+ spin_unlock_bh(&msk->pm.lock);
+ __mptcp_subflow_connect(sk, &local->addr, &remote, 0, 0);
+ spin_lock_bh(&msk->pm.lock);
+ return;
+ }
+
+ /* lookup failed, avoid fourther attempts later */
+ msk->pm.local_addr_used = local_addr_max;
+ check_work_pending(msk);
+ }
+}
+
+static void mptcp_pm_fm_fully_established(struct mptcp_sock *msk)
+{
+ mptcp_pm_fm_create_subflow(msk);
+}
+
+static void mptcp_pm_fm_subflow_established(struct mptcp_sock *msk)
+{
+ mptcp_pm_fm_create_subflow(msk);
+}
+
+void mptcp_pm_fm_work(struct mptcp_sock *msk)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+
+ msk_owned_by_me(msk);
+
+ spin_lock_bh(&msk->pm.lock);
+
+ pr_debug("msk=%p status=%x", msk, pm->status);
+ if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
+ pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
+ mptcp_pm_fm_fully_established(msk);
+ }
+ if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
+ pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
+ mptcp_pm_fm_subflow_established(msk);
+ }
+
+ spin_unlock_bh(&msk->pm.lock);
+}
+
void mptcp_pm_fm_data_init(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 56263c2c4014..d050dbd89e24 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -108,8 +108,8 @@ static void local_address(const struct sock_common *skc,
#endif
}
-static void remote_address(const struct sock_common *skc,
- struct mptcp_addr_info *addr)
+void remote_address(const struct sock_common *skc,
+ struct mptcp_addr_info *addr)
{
addr->family = skc->skc_family;
addr->port = skc->skc_dport;
@@ -121,8 +121,8 @@ static void remote_address(const struct sock_common *skc,
#endif
}
-static bool lookup_subflow_by_saddr(const struct list_head *list,
- struct mptcp_addr_info *saddr)
+bool lookup_subflow_by_saddr(const struct list_head *list,
+ struct mptcp_addr_info *saddr)
{
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info cur;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 938690f87b8f..2af54eb5fe44 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2343,6 +2343,8 @@ static void mptcp_worker(struct work_struct *work)
pm = mptcp_get_path_manager(net);
if (!strcmp(pm, "netlink"))
mptcp_pm_nl_work(msk);
+ else if (!strcmp(pm, "fullmesh"))
+ mptcp_pm_fm_work(msk);
}
if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3de2a298e8d9..99c23a1887d8 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -717,6 +717,10 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
struct mptcp_pm_add_entry *
mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
struct mptcp_addr_info *addr);
+bool lookup_subflow_by_saddr(const struct list_head *list,
+ struct mptcp_addr_info *saddr);
+void remote_address(const struct sock_common *skc,
+ struct mptcp_addr_info *addr);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
@@ -794,6 +798,7 @@ unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
void __init mptcp_pm_fm_init(void);
+void mptcp_pm_fm_work(struct mptcp_sock *msk);
void mptcp_pm_fm_data_init(struct mptcp_sock *msk);
void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
--
2.31.1
next prev parent reply other threads:[~2021-07-09 11:04 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-09 11:04 [MPTCP][PATCH mptcp-next 0/9] fullmesh path manager support Geliang Tang
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 1/9] mptcp: add a new sysctl path_manager Geliang Tang
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 2/9] mptcp: add fullmesh path manager Geliang Tang
2021-07-09 11:04 ` Geliang Tang [this message]
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 4/9] mptcp: register ipv4 addr notifier Geliang Tang
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 5/9] mptcp: register ipv6 " Geliang Tang
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 6/9] mptcp: add netdev up event handler Geliang Tang
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 7/9] mptcp: add netdev down " Geliang Tang
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 8/9] mptcp: add proc file mptcp_fullmesh Geliang Tang
2021-07-09 11:04 ` [MPTCP][PATCH mptcp-next 9/9] selftests: mptcp: add fullmesh testcases Geliang Tang
2021-07-13 0:04 ` [MPTCP][PATCH mptcp-next 0/9] fullmesh path manager support Mat Martineau
2021-07-16 17:28 ` Paolo Abeni
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8b000137623aa94ab4f7c6b883c2003d91f3d147.1625825505.git.geliangtang@gmail.com \
--to=geliangtang@gmail.com \
--cc=mptcp@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).