Similar to previous patch: needs to be mirrored to all subflows. Device bind is simpler: it is only done on the initial (listener) sk. Signed-off-by: Florian Westphal --- net/mptcp/sockopt.c | 51 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 33ca67e99f8f..9a87c50e21a4 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -79,6 +79,38 @@ static int mptcp_so_priority(struct mptcp_sock *msk, int val) return 0; } +static int mptcp_so_sndrcvbuf(struct mptcp_sock *msk, int optname, int val) +{ + sockptr_t optval = KERNEL_SOCKPTR(&val); + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int ret; + + ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, + optval, sizeof(val)); + if (ret) + return ret; + + lock_sock(sk); + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow = lock_sock_fast(ssk); + unsigned int ulock; + + ulock = sk->sk_userlocks; + ulock &= SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK; + + ssk->sk_priority = val; + ssk->sk_userlocks |= ulock; + WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf); + WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); + unlock_sock_fast(ssk, slow); + } + + release_sock(sk); + return 0; +} + static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { @@ -94,6 +126,11 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname, return 0; case SO_PRIORITY: return mptcp_so_priority(msk, val); + case SO_SNDBUF: + case SO_SNDBUFFORCE: + case SO_RCVBUF: + case SO_RCVBUFFORCE: + return mptcp_so_sndrcvbuf(msk, optname, val); } return -ENOPROTOOPT; @@ -109,6 +146,8 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, switch (optname) { case SO_REUSEPORT: case SO_REUSEADDR: + case SO_BINDTODEVICE: + case SO_BINDTOIFINDEX: lock_sock(sk); ssock = __mptcp_nmpc_socket(msk); if (!ssock) { @@ -122,11 +161,19 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, sk->sk_reuseport = ssock->sk->sk_reuseport; else if (optname == SO_REUSEADDR) sk->sk_reuse = ssock->sk->sk_reuse; + else if (optname == SO_BINDTODEVICE) + sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if; + else if (optname == SO_BINDTOIFINDEX) + sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if; } release_sock(sk); return ret; case SO_KEEPALIVE: case SO_PRIORITY: + case SO_SNDBUF: + case SO_SNDBUFFORCE: + case SO_RCVBUF: + case SO_RCVBUFFORCE: return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen); } @@ -441,6 +488,10 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) } ssk->sk_priority = sk->sk_priority; + ssk->sk_bound_dev_if = sk->sk_bound_dev_if; + + WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf); + WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); unlock_sock_fast(ssk, slow); } -- 2.26.2