All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets.
@ 2023-02-09 18:48 Paolo Abeni
  2023-02-09 18:48 ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
  2023-02-15 17:53 ` [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets Matthieu Baerts
  0 siblings, 2 replies; 17+ messages in thread
From: Paolo Abeni @ 2023-02-09 18:48 UTC (permalink / raw)
  To: mptcp; +Cc: Christoph Paasch

Christoph reported a UaF at token lookup time:

BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198

CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
Call Trace:
 <TASK>
 dump_stack_lvl+0x6e/0x91
 print_report+0x16a/0x46f
 kasan_report+0xad/0x130
 __token_bucket_busy+0x253/0x260
 mptcp_token_new_connect+0x13d/0x490
 mptcp_connect+0x4ed/0x860
 __inet_stream_connect+0x80e/0xd90
 tcp_sendmsg_fastopen+0x3ce/0x710
 mptcp_sendmsg+0xff1/0x1a20
 inet_sendmsg+0x11d/0x140
 __sys_sendto+0x405/0x490
 __x64_sys_sendto+0xdc/0x1b0
 do_syscall_64+0x3b/0x90
 entry_SYSCALL_64_after_hwframe+0x72/0xdc

We need to properly clean-up all the paired MPTCP-level
resources and be sure to release the msk last, even when
the unaccepted subflow is destroyed by the TCP internals
via inet_child_forget().

We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
explicitly checking that for the critical scenario: the
closed subflow is the MPC one, the msk is not accepted and
eventually going through full cleanup.

With such change, __mptcp_destroy_sock() is always called
on msk sockets, even on accepted ones. We don't need anymore
to transiently drop one sk reference at msk clone time.

Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/347
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
v1 -> v2:
 - fixed refcount issue
 - more verbose commit message
---
 net/mptcp/protocol.c | 26 +++++++++++++++++---------
 net/mptcp/protocol.h |  3 ++-
 net/mptcp/subflow.c  | 11 +++++++++--
 3 files changed, 28 insertions(+), 12 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 546df81c162f..d7588535bf6d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2444,9 +2444,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
 	return 0;
 }
 
-static void __mptcp_close_subflow(struct mptcp_sock *msk)
+static void __mptcp_close_subflow(struct sock *sk)
 {
 	struct mptcp_subflow_context *subflow, *tmp;
+	struct mptcp_sock *msk = mptcp_sk(sk);
 
 	might_sleep();
 
@@ -2460,7 +2461,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
 		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
 			continue;
 
-		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
+		mptcp_close_ssk(sk, ssk, subflow);
+	}
+
+	/* if the MPC subflow has been closed before the msk is accepted,
+	 * msk will never be accept-ed, close it now
+	 */
+	if (!msk->first && msk->in_accept_queue) {
+		sock_set_flag(sk, SOCK_DEAD);
+		sk->sk_state = TCP_CLOSE;
 	}
 }
 
@@ -2684,6 +2693,9 @@ static void mptcp_worker(struct work_struct *work)
 	__mptcp_check_send_data_fin(sk);
 	mptcp_check_data_fin(sk);
 
+	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+		__mptcp_close_subflow(sk);
+
 	/* There is no point in keeping around an orphaned sk timedout or
 	 * closed, but we need the msk around to reply to incoming DATA_FIN,
 	 * even if it is orphaned and in FIN_WAIT2 state
@@ -2699,9 +2711,6 @@ static void mptcp_worker(struct work_struct *work)
 		}
 	}
 
-	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
-		__mptcp_close_subflow(msk);
-
 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
 		__mptcp_retrans(sk);
 
@@ -3149,6 +3158,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
 	msk->local_key = subflow_req->local_key;
 	msk->token = subflow_req->token;
 	msk->subflow = NULL;
+	msk->in_accept_queue = 1;
 	WRITE_ONCE(msk->fully_established, false);
 	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
 		WRITE_ONCE(msk->csum_enabled, true);
@@ -3169,8 +3179,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
 	security_inet_csk_clone(nsk, req);
 	bh_unlock_sock(nsk);
 
-	/* keep a single reference */
-	__sock_put(nsk);
+	/* note: the newly allocated socket refcount is 2 now */
 	return nsk;
 }
 
@@ -3226,8 +3235,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
 			goto out;
 		}
 
-		/* acquire the 2nd reference for the owning socket */
-		sock_hold(new_mptcp_sock);
 		newsk = new_mptcp_sock;
 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
 	} else {
@@ -3781,6 +3788,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
 		struct mptcp_subflow_context *subflow;
 
 		set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
+		msk->in_accept_queue = 0;
 
 		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
 		 * This is needed so NOSPACE flag can be set from tcp stack.
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3a055438c65e..252f050c96c6 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -294,7 +294,8 @@ struct mptcp_sock {
 	u8		recvmsg_inq:1,
 			cork:1,
 			nodelay:1,
-			fastopening:1;
+			fastopening:1,
+			in_accept_queue:1;
 	int		connect_flags;
 	struct work_struct work;
 	struct sk_buff  *ooo_last_skb;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index f075607adad4..7cc967985011 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -693,9 +693,10 @@ static bool subflow_hmac_valid(const struct request_sock *req,
 
 static void mptcp_force_close(struct sock *sk)
 {
-	/* the msk is not yet exposed to user-space */
+	/* the msk is not yet exposed to user-space, and refcount is 2 */
 	inet_sk_state_store(sk, TCP_CLOSE);
 	sk_common_release(sk);
+	sock_put(sk);
 }
 
 static void subflow_ulp_fallback(struct sock *sk,
@@ -1854,7 +1855,6 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
 		struct sock *sk = (struct sock *)msk;
 		bool do_cancel_work;
 
-		sock_hold(sk);
 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 		next = msk->dl_next;
 		msk->dl_next = NULL;
@@ -1949,6 +1949,13 @@ static void subflow_ulp_release(struct sock *ssk)
 		 * the ctx alive, will be freed by __mptcp_close_ssk()
 		 */
 		release = ctx->disposable;
+
+		/* inet_child_forget() does not call sk_state_change(),
+		 * explicitly trigger the socket close machinery
+		 */
+		if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
+						  &mptcp_sk(sk)->flags))
+			mptcp_schedule_work(sk);
 		sock_put(sk);
 	}
 
-- 
2.39.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-09 18:48 [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets Paolo Abeni
@ 2023-02-09 18:48 ` Paolo Abeni
  2023-02-10 12:18   ` mptcp: fix UaF in listener shutdown: Tests Results MPTCP CI
  2023-02-17 17:58   ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Matthieu Baerts
  2023-02-15 17:53 ` [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets Matthieu Baerts
  1 sibling, 2 replies; 17+ messages in thread
From: Paolo Abeni @ 2023-02-09 18:48 UTC (permalink / raw)
  To: mptcp; +Cc: Christoph Paasch

As reported by Christoph, the mptcp listener shutdown path is prone
to an UaF issue.

BUG: KASAN: use-after-free in _raw_spin_lock_bh+0x73/0xe0
Write of size 4 at addr ffff88810cb23098 by task syz-executor731/1266

CPU: 1 PID: 1266 Comm: syz-executor731 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
Call Trace:
 <TASK>
 dump_stack_lvl+0x6e/0x91
 print_report+0x16a/0x46f
 kasan_report+0xad/0x130
 kasan_check_range+0x14a/0x1a0
 _raw_spin_lock_bh+0x73/0xe0
 subflow_error_report+0x6d/0x110
 sk_error_report+0x3b/0x190
 tcp_disconnect+0x138c/0x1aa0
 inet_child_forget+0x6f/0x2e0
 inet_csk_listen_stop+0x209/0x1060
 __mptcp_close_ssk+0x52d/0x610
 mptcp_destroy_common+0x165/0x640
 mptcp_destroy+0x13/0x80
 __mptcp_destroy_sock+0xe7/0x270
 __mptcp_close+0x70e/0x9b0
 mptcp_close+0x2b/0x150
 inet_release+0xe9/0x1f0
 __sock_release+0xd2/0x280
 sock_close+0x15/0x20
 __fput+0x252/0xa20
 task_work_run+0x169/0x250
 exit_to_user_mode_prepare+0x113/0x120
 syscall_exit_to_user_mode+0x1d/0x40
 do_syscall_64+0x48/0x90
 entry_SYSCALL_64_after_hwframe+0x72/0xdc

The msk grace period can legitly expire in between the last
reference count dropped in mptcp_subflow_queue_clean() and
the later eventual access in inet_csk_listen_stop()

After the previous patch we don't need anymore special-casing
msk listener socket cleanup: the mptcp worker will process each
of the unaccepted msk sockets.

Just drop the now unnecessary code.

Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/346
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
 net/mptcp/protocol.c |  1 -
 net/mptcp/protocol.h |  1 -
 net/mptcp/subflow.c  | 80 --------------------------------------------
 3 files changed, 82 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index d7588535bf6d..ae2d715f82b5 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2400,7 +2400,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
 		/* otherwise tcp will dispose of the ssk and subflow ctx */
 		if (ssk->sk_state == TCP_LISTEN) {
 			tcp_set_state(ssk, TCP_CLOSE);
-			mptcp_subflow_queue_clean(sk, ssk);
 			inet_csk_listen_stop(ssk);
 			mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
 		}
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 252f050c96c6..ba4d22ffd228 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -631,7 +631,6 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
 		     struct mptcp_subflow_context *subflow);
 void __mptcp_subflow_send_ack(struct sock *ssk);
 void mptcp_subflow_reset(struct sock *ssk);
-void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
 void mptcp_sock_graft(struct sock *sk, struct socket *parent);
 struct socket *__mptcp_nmpc_socket(struct mptcp_sock *msk);
 bool __mptcp_close(struct sock *sk, long timeout);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 7cc967985011..8b46311b8d5e 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1814,86 +1814,6 @@ static void subflow_state_change(struct sock *sk)
 	}
 }
 
-void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
-{
-	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
-	struct mptcp_sock *msk, *next, *head = NULL;
-	struct request_sock *req;
-
-	/* build a list of all unaccepted mptcp sockets */
-	spin_lock_bh(&queue->rskq_lock);
-	for (req = queue->rskq_accept_head; req; req = req->dl_next) {
-		struct mptcp_subflow_context *subflow;
-		struct sock *ssk = req->sk;
-		struct mptcp_sock *msk;
-
-		if (!sk_is_mptcp(ssk))
-			continue;
-
-		subflow = mptcp_subflow_ctx(ssk);
-		if (!subflow || !subflow->conn)
-			continue;
-
-		/* skip if already in list */
-		msk = mptcp_sk(subflow->conn);
-		if (msk->dl_next || msk == head)
-			continue;
-
-		msk->dl_next = head;
-		head = msk;
-	}
-	spin_unlock_bh(&queue->rskq_lock);
-	if (!head)
-		return;
-
-	/* can't acquire the msk socket lock under the subflow one,
-	 * or will cause ABBA deadlock
-	 */
-	release_sock(listener_ssk);
-
-	for (msk = head; msk; msk = next) {
-		struct sock *sk = (struct sock *)msk;
-		bool do_cancel_work;
-
-		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-		next = msk->dl_next;
-		msk->dl_next = NULL;
-
-		/* The upcoming mptcp_close is going to drop all the references
-		 * to the first subflow, ignoring that one of such reference is
-		 * owned by the request socket still in the accept queue and that
-		 * later inet_csk_listen_stop will drop it.
-		 * Acquire an extra reference here to avoid an UaF at that point.
-		 */
-		if (msk->first)
-			sock_hold(msk->first);
-
-		do_cancel_work = __mptcp_close(sk, -1);
-		release_sock(sk);
-		if (do_cancel_work) {
-			/* lockdep will report a false positive ABBA deadlock
-			 * between cancel_work_sync and the listener socket.
-			 * The involved locks belong to different sockets WRT
-			 * the existing AB chain.
-			 * Using a per socket key is problematic as key
-			 * deregistration requires process context and must be
-			 * performed at socket disposal time, in atomic
-			 * context.
-			 * Just tell lockdep to consider the listener socket
-			 * released here.
-			 */
-			mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
-			mptcp_cancel_work(sk);
-			mutex_acquire(&listener_sk->sk_lock.dep_map,
-				      SINGLE_DEPTH_NESTING, 0, _RET_IP_);
-		}
-		sock_put(sk);
-	}
-
-	/* we are still under the listener msk socket lock */
-	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
-}
-
 static int subflow_ulp_init(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-- 
2.39.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: mptcp: fix UaF in listener shutdown: Tests Results
  2023-02-09 18:48 ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
@ 2023-02-10 12:18   ` MPTCP CI
  2023-02-17 17:58   ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Matthieu Baerts
  1 sibling, 0 replies; 17+ messages in thread
From: MPTCP CI @ 2023-02-10 12:18 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp

Hi Paolo,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal (except selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/5943503453159424
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5943503453159424/summary/summary.txt

- KVM Validation: debug (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/4676866057961472
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/4676866057961472/summary/summary.txt

- KVM Validation: normal (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/5380553499738112
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5380553499738112/summary/summary.txt

- KVM Validation: debug (except selftest_mptcp_join):
  - Unstable: 1 failed test(s): packetdrill_add_addr 🔴:
  - Task: https://cirrus-ci.com/task/6506453406580736
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6506453406580736/summary/summary.txt

Initiator: Matthieu Baerts
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/017df44e3b70


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets.
  2023-02-09 18:48 [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets Paolo Abeni
  2023-02-09 18:48 ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
@ 2023-02-15 17:53 ` Matthieu Baerts
  2023-02-16 14:58   ` Paolo Abeni
  1 sibling, 1 reply; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-15 17:53 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

Hi Paolo,

On 09/02/2023 19:48, Paolo Abeni wrote:
> Christoph reported a UaF at token lookup time:
> 
> BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
> Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198
> 
> CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
> Call Trace:
>  <TASK>
>  dump_stack_lvl+0x6e/0x91
>  print_report+0x16a/0x46f
>  kasan_report+0xad/0x130
>  __token_bucket_busy+0x253/0x260
>  mptcp_token_new_connect+0x13d/0x490
>  mptcp_connect+0x4ed/0x860
>  __inet_stream_connect+0x80e/0xd90
>  tcp_sendmsg_fastopen+0x3ce/0x710
>  mptcp_sendmsg+0xff1/0x1a20
>  inet_sendmsg+0x11d/0x140
>  __sys_sendto+0x405/0x490
>  __x64_sys_sendto+0xdc/0x1b0
>  do_syscall_64+0x3b/0x90
>  entry_SYSCALL_64_after_hwframe+0x72/0xdc
> 
> We need to properly clean-up all the paired MPTCP-level
> resources and be sure to release the msk last, even when
> the unaccepted subflow is destroyed by the TCP internals
> via inet_child_forget().
> 
> We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
> explicitly checking that for the critical scenario: the
> closed subflow is the MPC one, the msk is not accepted and
> eventually going through full cleanup.
> 
> With such change, __mptcp_destroy_sock() is always called
> on msk sockets, even on accepted ones. We don't need anymore
> to transiently drop one sk reference at msk clone time.

Thank you for the patch, I just have one question below if you don't mind :)

(...)

> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 546df81c162f..d7588535bf6d 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -2444,9 +2444,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
>  	return 0;
>  }
>  
> -static void __mptcp_close_subflow(struct mptcp_sock *msk)
> +static void __mptcp_close_subflow(struct sock *sk)
>  {
>  	struct mptcp_subflow_context *subflow, *tmp;
> +	struct mptcp_sock *msk = mptcp_sk(sk);
>  
>  	might_sleep();
>  
> @@ -2460,7 +2461,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
>  		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
>  			continue;
>  
> -		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
> +		mptcp_close_ssk(sk, ssk, subflow);
> +	}
> +
> +	/* if the MPC subflow has been closed before the msk is accepted,
> +	 * msk will never be accept-ed, close it now
> +	 */
> +	if (!msk->first && msk->in_accept_queue) {
> +		sock_set_flag(sk, SOCK_DEAD);
> +		sk->sk_state = TCP_CLOSE;

Would it make sense to call inet_sk_state_store() here, mainly for the
tracing?

Cheers,
Matt
-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets.
  2023-02-15 17:53 ` [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets Matthieu Baerts
@ 2023-02-16 14:58   ` Paolo Abeni
  2023-02-16 15:07     ` Matthieu Baerts
  0 siblings, 1 reply; 17+ messages in thread
From: Paolo Abeni @ 2023-02-16 14:58 UTC (permalink / raw)
  To: Matthieu Baerts, mptcp; +Cc: Christoph Paasch

On Wed, 2023-02-15 at 18:53 +0100, Matthieu Baerts wrote:
> Hi Paolo,
> 
> On 09/02/2023 19:48, Paolo Abeni wrote:
> > Christoph reported a UaF at token lookup time:
> > 
> > BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
> > Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198
> > 
> > CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
> > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
> > Call Trace:
> >  <TASK>
> >  dump_stack_lvl+0x6e/0x91
> >  print_report+0x16a/0x46f
> >  kasan_report+0xad/0x130
> >  __token_bucket_busy+0x253/0x260
> >  mptcp_token_new_connect+0x13d/0x490
> >  mptcp_connect+0x4ed/0x860
> >  __inet_stream_connect+0x80e/0xd90
> >  tcp_sendmsg_fastopen+0x3ce/0x710
> >  mptcp_sendmsg+0xff1/0x1a20
> >  inet_sendmsg+0x11d/0x140
> >  __sys_sendto+0x405/0x490
> >  __x64_sys_sendto+0xdc/0x1b0
> >  do_syscall_64+0x3b/0x90
> >  entry_SYSCALL_64_after_hwframe+0x72/0xdc
> > 
> > We need to properly clean-up all the paired MPTCP-level
> > resources and be sure to release the msk last, even when
> > the unaccepted subflow is destroyed by the TCP internals
> > via inet_child_forget().
> > 
> > We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
> > explicitly checking that for the critical scenario: the
> > closed subflow is the MPC one, the msk is not accepted and
> > eventually going through full cleanup.
> > 
> > With such change, __mptcp_destroy_sock() is always called
> > on msk sockets, even on accepted ones. We don't need anymore
> > to transiently drop one sk reference at msk clone time.
> 
> Thank you for the patch, I just have one question below if you don't mind :)
> 
> (...)
> 
> > diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> > index 546df81c162f..d7588535bf6d 100644
> > --- a/net/mptcp/protocol.c
> > +++ b/net/mptcp/protocol.c
> > @@ -2444,9 +2444,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
> >  	return 0;
> >  }
> >  
> > -static void __mptcp_close_subflow(struct mptcp_sock *msk)
> > +static void __mptcp_close_subflow(struct sock *sk)
> >  {
> >  	struct mptcp_subflow_context *subflow, *tmp;
> > +	struct mptcp_sock *msk = mptcp_sk(sk);
> >  
> >  	might_sleep();
> >  
> > @@ -2460,7 +2461,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
> >  		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
> >  			continue;
> >  
> > -		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
> > +		mptcp_close_ssk(sk, ssk, subflow);
> > +	}
> > +
> > +	/* if the MPC subflow has been closed before the msk is accepted,
> > +	 * msk will never be accept-ed, close it now
> > +	 */
> > +	if (!msk->first && msk->in_accept_queue) {
> > +		sock_set_flag(sk, SOCK_DEAD);
> > +		sk->sk_state = TCP_CLOSE;
> 
> Would it make sense to call inet_sk_state_store() here, mainly for the
> tracing?

Right you are, this si the msk, it's state is read lockless by the
subflows.

Do you prefer a v3 or editing that in while applying the patch?

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets.
  2023-02-16 14:58   ` Paolo Abeni
@ 2023-02-16 15:07     ` Matthieu Baerts
  2023-02-16 15:15       ` Paolo Abeni
  0 siblings, 1 reply; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-16 15:07 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

Hi Paolo,

On 16/02/2023 15:58, Paolo Abeni wrote:
> On Wed, 2023-02-15 at 18:53 +0100, Matthieu Baerts wrote:
>> Hi Paolo,
>>
>> On 09/02/2023 19:48, Paolo Abeni wrote:
>>> Christoph reported a UaF at token lookup time:
>>>
>>> BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
>>> Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198
>>>
>>> CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
>>> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
>>> Call Trace:
>>>  <TASK>
>>>  dump_stack_lvl+0x6e/0x91
>>>  print_report+0x16a/0x46f
>>>  kasan_report+0xad/0x130
>>>  __token_bucket_busy+0x253/0x260
>>>  mptcp_token_new_connect+0x13d/0x490
>>>  mptcp_connect+0x4ed/0x860
>>>  __inet_stream_connect+0x80e/0xd90
>>>  tcp_sendmsg_fastopen+0x3ce/0x710
>>>  mptcp_sendmsg+0xff1/0x1a20
>>>  inet_sendmsg+0x11d/0x140
>>>  __sys_sendto+0x405/0x490
>>>  __x64_sys_sendto+0xdc/0x1b0
>>>  do_syscall_64+0x3b/0x90
>>>  entry_SYSCALL_64_after_hwframe+0x72/0xdc
>>>
>>> We need to properly clean-up all the paired MPTCP-level
>>> resources and be sure to release the msk last, even when
>>> the unaccepted subflow is destroyed by the TCP internals
>>> via inet_child_forget().
>>>
>>> We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
>>> explicitly checking that for the critical scenario: the
>>> closed subflow is the MPC one, the msk is not accepted and
>>> eventually going through full cleanup.
>>>
>>> With such change, __mptcp_destroy_sock() is always called
>>> on msk sockets, even on accepted ones. We don't need anymore
>>> to transiently drop one sk reference at msk clone time.
>>
>> Thank you for the patch, I just have one question below if you don't mind :)
>>
>> (...)
>>
>>> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
>>> index 546df81c162f..d7588535bf6d 100644
>>> --- a/net/mptcp/protocol.c
>>> +++ b/net/mptcp/protocol.c
>>> @@ -2444,9 +2444,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
>>>  	return 0;
>>>  }
>>>  
>>> -static void __mptcp_close_subflow(struct mptcp_sock *msk)
>>> +static void __mptcp_close_subflow(struct sock *sk)
>>>  {
>>>  	struct mptcp_subflow_context *subflow, *tmp;
>>> +	struct mptcp_sock *msk = mptcp_sk(sk);
>>>  
>>>  	might_sleep();
>>>  
>>> @@ -2460,7 +2461,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
>>>  		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
>>>  			continue;
>>>  
>>> -		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
>>> +		mptcp_close_ssk(sk, ssk, subflow);
>>> +	}
>>> +
>>> +	/* if the MPC subflow has been closed before the msk is accepted,
>>> +	 * msk will never be accept-ed, close it now
>>> +	 */
>>> +	if (!msk->first && msk->in_accept_queue) {
>>> +		sock_set_flag(sk, SOCK_DEAD);
>>> +		sk->sk_state = TCP_CLOSE;
>>
>> Would it make sense to call inet_sk_state_store() here, mainly for the
>> tracing?
> 
> Right you are, this si the msk, it's state is read lockless by the
> subflows.

Sorry, I'm not sure I understood your reply. Just to be sure, we should
then replace this single line:

  sk->sk_state = TCP_CLOSE;

by:

  inet_sk_state_store(sk, TCP_CLOSE);

and nothing else.

(and we should not use tcp_set_state() here because that's the msk)

> Do you prefer a v3 or editing that in while applying the patch?

If the suggested modification is correct, I can do the modification (+
the Fixes tags) when applying the patches.

Cheers,
Matt
-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets.
  2023-02-16 15:07     ` Matthieu Baerts
@ 2023-02-16 15:15       ` Paolo Abeni
  2023-02-16 15:24         ` Matthieu Baerts
  0 siblings, 1 reply; 17+ messages in thread
From: Paolo Abeni @ 2023-02-16 15:15 UTC (permalink / raw)
  To: Matthieu Baerts, mptcp; +Cc: Christoph Paasch

On Thu, 2023-02-16 at 16:07 +0100, Matthieu Baerts wrote:
> Hi Paolo,
> 
> On 16/02/2023 15:58, Paolo Abeni wrote:
> > On Wed, 2023-02-15 at 18:53 +0100, Matthieu Baerts wrote:
> > > Hi Paolo,
> > > 
> > > On 09/02/2023 19:48, Paolo Abeni wrote:
> > > > Christoph reported a UaF at token lookup time:
> > > > 
> > > > BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
> > > > Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198
> > > > 
> > > > CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
> > > > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
> > > > Call Trace:
> > > >  <TASK>
> > > >  dump_stack_lvl+0x6e/0x91
> > > >  print_report+0x16a/0x46f
> > > >  kasan_report+0xad/0x130
> > > >  __token_bucket_busy+0x253/0x260
> > > >  mptcp_token_new_connect+0x13d/0x490
> > > >  mptcp_connect+0x4ed/0x860
> > > >  __inet_stream_connect+0x80e/0xd90
> > > >  tcp_sendmsg_fastopen+0x3ce/0x710
> > > >  mptcp_sendmsg+0xff1/0x1a20
> > > >  inet_sendmsg+0x11d/0x140
> > > >  __sys_sendto+0x405/0x490
> > > >  __x64_sys_sendto+0xdc/0x1b0
> > > >  do_syscall_64+0x3b/0x90
> > > >  entry_SYSCALL_64_after_hwframe+0x72/0xdc
> > > > 
> > > > We need to properly clean-up all the paired MPTCP-level
> > > > resources and be sure to release the msk last, even when
> > > > the unaccepted subflow is destroyed by the TCP internals
> > > > via inet_child_forget().
> > > > 
> > > > We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
> > > > explicitly checking that for the critical scenario: the
> > > > closed subflow is the MPC one, the msk is not accepted and
> > > > eventually going through full cleanup.
> > > > 
> > > > With such change, __mptcp_destroy_sock() is always called
> > > > on msk sockets, even on accepted ones. We don't need anymore
> > > > to transiently drop one sk reference at msk clone time.
> > > 
> > > Thank you for the patch, I just have one question below if you don't mind :)
> > > 
> > > (...)
> > > 
> > > > diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> > > > index 546df81c162f..d7588535bf6d 100644
> > > > --- a/net/mptcp/protocol.c
> > > > +++ b/net/mptcp/protocol.c
> > > > @@ -2444,9 +2444,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
> > > >  	return 0;
> > > >  }
> > > >  
> > > > -static void __mptcp_close_subflow(struct mptcp_sock *msk)
> > > > +static void __mptcp_close_subflow(struct sock *sk)
> > > >  {
> > > >  	struct mptcp_subflow_context *subflow, *tmp;
> > > > +	struct mptcp_sock *msk = mptcp_sk(sk);
> > > >  
> > > >  	might_sleep();
> > > >  
> > > > @@ -2460,7 +2461,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
> > > >  		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
> > > >  			continue;
> > > >  
> > > > -		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
> > > > +		mptcp_close_ssk(sk, ssk, subflow);
> > > > +	}
> > > > +
> > > > +	/* if the MPC subflow has been closed before the msk is accepted,
> > > > +	 * msk will never be accept-ed, close it now
> > > > +	 */
> > > > +	if (!msk->first && msk->in_accept_queue) {
> > > > +		sock_set_flag(sk, SOCK_DEAD);
> > > > +		sk->sk_state = TCP_CLOSE;
> > > 
> > > Would it make sense to call inet_sk_state_store() here, mainly for the
> > > tracing?
> > 
> > Right you are, this si the msk, it's state is read lockless by the
> > subflows.
> 
> Sorry, I'm not sure I understood your reply. Just to be sure, we should
> then replace this single line:
> 
>   sk->sk_state = TCP_CLOSE;
> 
> by:
> 
>   inet_sk_state_store(sk, TCP_CLOSE);
> 
> and nothing else.

Exactly.

What I was trying to say above is that we need such change because the
msk (== sk above) status is accessed (read) by the subflows without
holding the msk socket lock (using inet_sk_state_load()). Hence  we
must use inet_sk_state_store() to write it.

> 
> (and we should not use tcp_set_state() here because that's the msk)
> 
> > Do you prefer a v3 or editing that in while applying the patch?
> 
> If the suggested modification is correct, I can do the modification (+
> the Fixes tags) when applying the patches.

Thanks!

Paolo


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets.
  2023-02-16 15:15       ` Paolo Abeni
@ 2023-02-16 15:24         ` Matthieu Baerts
  0 siblings, 0 replies; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-16 15:24 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

On 16/02/2023 16:15, Paolo Abeni wrote:
> On Thu, 2023-02-16 at 16:07 +0100, Matthieu Baerts wrote:
>> Hi Paolo,
>>
>> On 16/02/2023 15:58, Paolo Abeni wrote:
>>> On Wed, 2023-02-15 at 18:53 +0100, Matthieu Baerts wrote:
>>>> Hi Paolo,
>>>>
>>>> On 09/02/2023 19:48, Paolo Abeni wrote:
>>>>> Christoph reported a UaF at token lookup time:
>>>>>
>>>>> BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
>>>>> Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198
>>>>>
>>>>> CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
>>>>> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
>>>>> Call Trace:
>>>>>  <TASK>
>>>>>  dump_stack_lvl+0x6e/0x91
>>>>>  print_report+0x16a/0x46f
>>>>>  kasan_report+0xad/0x130
>>>>>  __token_bucket_busy+0x253/0x260
>>>>>  mptcp_token_new_connect+0x13d/0x490
>>>>>  mptcp_connect+0x4ed/0x860
>>>>>  __inet_stream_connect+0x80e/0xd90
>>>>>  tcp_sendmsg_fastopen+0x3ce/0x710
>>>>>  mptcp_sendmsg+0xff1/0x1a20
>>>>>  inet_sendmsg+0x11d/0x140
>>>>>  __sys_sendto+0x405/0x490
>>>>>  __x64_sys_sendto+0xdc/0x1b0
>>>>>  do_syscall_64+0x3b/0x90
>>>>>  entry_SYSCALL_64_after_hwframe+0x72/0xdc
>>>>>
>>>>> We need to properly clean-up all the paired MPTCP-level
>>>>> resources and be sure to release the msk last, even when
>>>>> the unaccepted subflow is destroyed by the TCP internals
>>>>> via inet_child_forget().
>>>>>
>>>>> We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
>>>>> explicitly checking that for the critical scenario: the
>>>>> closed subflow is the MPC one, the msk is not accepted and
>>>>> eventually going through full cleanup.
>>>>>
>>>>> With such change, __mptcp_destroy_sock() is always called
>>>>> on msk sockets, even on accepted ones. We don't need anymore
>>>>> to transiently drop one sk reference at msk clone time.
>>>>
>>>> Thank you for the patch, I just have one question below if you don't mind :)
>>>>
>>>> (...)
>>>>
>>>>> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
>>>>> index 546df81c162f..d7588535bf6d 100644
>>>>> --- a/net/mptcp/protocol.c
>>>>> +++ b/net/mptcp/protocol.c
>>>>> @@ -2444,9 +2444,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
>>>>>  	return 0;
>>>>>  }
>>>>>  
>>>>> -static void __mptcp_close_subflow(struct mptcp_sock *msk)
>>>>> +static void __mptcp_close_subflow(struct sock *sk)
>>>>>  {
>>>>>  	struct mptcp_subflow_context *subflow, *tmp;
>>>>> +	struct mptcp_sock *msk = mptcp_sk(sk);
>>>>>  
>>>>>  	might_sleep();
>>>>>  
>>>>> @@ -2460,7 +2461,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
>>>>>  		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
>>>>>  			continue;
>>>>>  
>>>>> -		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
>>>>> +		mptcp_close_ssk(sk, ssk, subflow);
>>>>> +	}
>>>>> +
>>>>> +	/* if the MPC subflow has been closed before the msk is accepted,
>>>>> +	 * msk will never be accept-ed, close it now
>>>>> +	 */
>>>>> +	if (!msk->first && msk->in_accept_queue) {
>>>>> +		sock_set_flag(sk, SOCK_DEAD);
>>>>> +		sk->sk_state = TCP_CLOSE;
>>>>
>>>> Would it make sense to call inet_sk_state_store() here, mainly for the
>>>> tracing?
>>>
>>> Right you are, this si the msk, it's state is read lockless by the
>>> subflows.
>>
>> Sorry, I'm not sure I understood your reply. Just to be sure, we should
>> then replace this single line:
>>
>>   sk->sk_state = TCP_CLOSE;
>>
>> by:
>>
>>   inet_sk_state_store(sk, TCP_CLOSE);
>>
>> and nothing else.
> 
> Exactly.
> 
> What I was trying to say above is that we need such change because the
> msk (== sk above) status is accessed (read) by the subflows without
> holding the msk socket lock (using inet_sk_state_load()). Hence  we
> must use inet_sk_state_store() to write it.

Ah yes indeed, sorry now I see what you meant :-)

I can apply them in a bit.

Cheers,
Matt
-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-09 18:48 ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
  2023-02-10 12:18   ` mptcp: fix UaF in listener shutdown: Tests Results MPTCP CI
@ 2023-02-17 17:58   ` Matthieu Baerts
  2023-02-20 11:14     ` Matthieu Baerts
  1 sibling, 1 reply; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-17 17:58 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

Hi Paolo,

On 09/02/2023 19:48, Paolo Abeni wrote:
> As reported by Christoph, the mptcp listener shutdown path is prone
> to an UaF issue.

Again, thank you for the two patches!

Now in our tree (fixes for -net) with my RvB tag.

I had a few conflicts when applying them on -net (and later when
propagating them to our tree, see below), I hope that's OK but don't
hesitate to double check if you have the opportunity :)

New patches for t/upstream-net and t/upstream:
- 4f70189ea062: mptcp: use the workqueue to destroy unaccepted sockets
- 477601b8695d: mptcp: fix UaF in listener shutdown
- Results: 2a5c259387e2..de8ae10003bd (export-net)

- 1c2f1e5c434b: conflict in t/mptcp-refactor-passive-socket-initialization
- 1fed9c65e6d5: conflict in t/mptcp-drop-legacy-code
- a5a7fffe5413: conflict in
t/mptcp-fastclose-msk-when-cleaning-unaccepted-sockets
- Results: c6f15583a946..2275e6265ebe (export)

Tests are now in progress:

https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export-net/20230217T145032
https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export/20230217T145032

Cheers,
Matt
-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-17 17:58   ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Matthieu Baerts
@ 2023-02-20 11:14     ` Matthieu Baerts
  2023-02-20 11:51       ` Paolo Abeni
  0 siblings, 1 reply; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-20 11:14 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

Hi Paolo,

On 17/02/2023 18:58, Matthieu Baerts wrote:
> Hi Paolo,
> 
> On 09/02/2023 19:48, Paolo Abeni wrote:
>> As reported by Christoph, the mptcp listener shutdown path is prone
>> to an UaF issue.
> 
> Again, thank you for the two patches!
> 
> Now in our tree (fixes for -net) with my RvB tag.
> 
> I had a few conflicts when applying them on -net (and later when
> propagating them to our tree, see below), I hope that's OK but don't
> hesitate to double check if you have the opportunity :)

I probably didn't resolve the conflicts properly (or maybe some
adaptations are needed on -net) because this introduce some regressions
on export-net (not on net-next):

Any idea what could cause that?

https://github.com/multipath-tcp/mptcp_net-next/issues/361

Cheers,
Matt

> New patches for t/upstream-net and t/upstream:
> - 4f70189ea062: mptcp: use the workqueue to destroy unaccepted sockets
> - 477601b8695d: mptcp: fix UaF in listener shutdown
> - Results: 2a5c259387e2..de8ae10003bd (export-net)
> 
> - 1c2f1e5c434b: conflict in t/mptcp-refactor-passive-socket-initialization
> - 1fed9c65e6d5: conflict in t/mptcp-drop-legacy-code
> - a5a7fffe5413: conflict in
> t/mptcp-fastclose-msk-when-cleaning-unaccepted-sockets
> - Results: c6f15583a946..2275e6265ebe (export)
> 
> Tests are now in progress:
> 
> https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export-net/20230217T145032
> https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export/20230217T145032
> 
> Cheers,
> Matt

-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-20 11:14     ` Matthieu Baerts
@ 2023-02-20 11:51       ` Paolo Abeni
  2023-02-20 14:05         ` Matthieu Baerts
  0 siblings, 1 reply; 17+ messages in thread
From: Paolo Abeni @ 2023-02-20 11:51 UTC (permalink / raw)
  To: Matthieu Baerts, mptcp; +Cc: Christoph Paasch

On Mon, 2023-02-20 at 12:14 +0100, Matthieu Baerts wrote:
> Hi Paolo,
> 
> On 17/02/2023 18:58, Matthieu Baerts wrote:
> > Hi Paolo,
> > 
> > On 09/02/2023 19:48, Paolo Abeni wrote:
> > > As reported by Christoph, the mptcp listener shutdown path is prone
> > > to an UaF issue.
> > 
> > Again, thank you for the two patches!
> > 
> > Now in our tree (fixes for -net) with my RvB tag.
> > 
> > I had a few conflicts when applying them on -net (and later when
> > propagating them to our tree, see below), I hope that's OK but don't
> > hesitate to double check if you have the opportunity :)
> 
> I probably didn't resolve the conflicts properly (or maybe some
> adaptations are needed on -net) because this introduce some regressions
> on export-net (not on net-next):
> 
> Any idea what could cause that?
> 
> https://github.com/multipath-tcp/mptcp_net-next/issues/361

I *think* it's not a conflict resolution issue; patch 1/2 here
functionally depends on ("mptcp: refactor passive socket
initialization") which let msk take a reference on unaccepted MPC
subflows, so that incoming reset will not free such subflow via
inet_child_forget().

I *think* the easier solution is moving this patches on export-next
after ("mptcp: refactor passive socket initialization"), mentioned
explicitly the functional dependency

WDYT?

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-20 11:51       ` Paolo Abeni
@ 2023-02-20 14:05         ` Matthieu Baerts
  2023-02-20 14:30           ` Paolo Abeni
  0 siblings, 1 reply; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-20 14:05 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

Hi Paolo,

Thank you for your reply.

On 20/02/2023 12:51, Paolo Abeni wrote:
> On Mon, 2023-02-20 at 12:14 +0100, Matthieu Baerts wrote:
>> Hi Paolo,
>>
>> On 17/02/2023 18:58, Matthieu Baerts wrote:
>>> Hi Paolo,
>>>
>>> On 09/02/2023 19:48, Paolo Abeni wrote:
>>>> As reported by Christoph, the mptcp listener shutdown path is prone
>>>> to an UaF issue.
>>>
>>> Again, thank you for the two patches!
>>>
>>> Now in our tree (fixes for -net) with my RvB tag.
>>>
>>> I had a few conflicts when applying them on -net (and later when
>>> propagating them to our tree, see below), I hope that's OK but don't
>>> hesitate to double check if you have the opportunity :)
>>
>> I probably didn't resolve the conflicts properly (or maybe some
>> adaptations are needed on -net) because this introduce some regressions
>> on export-net (not on net-next):
>>
>> Any idea what could cause that?
>>
>> https://github.com/multipath-tcp/mptcp_net-next/issues/361
> 
> I *think* it's not a conflict resolution issue; patch 1/2 here
> functionally depends on ("mptcp: refactor passive socket
> initialization") which let msk take a reference on unaccepted MPC
> subflows, so that incoming reset will not free such subflow via
> inet_child_forget().
> 
> I *think* the easier solution is moving this patches on export-next
> after ("mptcp: refactor passive socket initialization"), mentioned
> explicitly the functional dependency
Indeed, it makes sense. I just did that.

I removed the Fixes tags and mentioned it was linked to the refactor of
the passive socket initialization: is it OK? :)



Revert from -net side:

New patches for t/upstream-net:
- 1403082cb4f9: Revert "mptcp: use the workqueue to destroy unaccepted
sockets"
- a68e4d2c86ab: Revert "mptcp: fix UaF in listener shutdown"
- Results: 67a9b5dd9647..1231bdd38564 (export-net)

Tests are now in progress:

https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export-net/20230220T125607



Re-adding in net-next side *before*:

   mptcp: avoid unneeded __mptcp_nmpc_socket() usage

New patches for t/upstream:
- b1eb4bc7fd93: conflict in t/mptcp-drop-legacy-code
- (9842f24e0af3: mptcp: fix wrong merge resolution)
- bcf869e80661: mptcp: use the workqueue to destroy unaccepted sockets
(net-next)
- (f1af1e12dde5: mptcp: add missing fix discussed on the ML)
- be46758055b5: conflict in
t/mptcp-use-the-workqueue-to-destroy-unaccepted-sockets-net-next
- cd7f29d04aa1: mptcp: fix UaF in listener shutdown (net-next)
- Results: 6d5a19a2f0c5..07db02c0176e (export) => empty, as expected!

Tests are now in progress:

https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export/20230220T135733

Cheers,
Matt
-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-20 14:05         ` Matthieu Baerts
@ 2023-02-20 14:30           ` Paolo Abeni
  2023-02-20 14:37             ` Matthieu Baerts
  0 siblings, 1 reply; 17+ messages in thread
From: Paolo Abeni @ 2023-02-20 14:30 UTC (permalink / raw)
  To: Matthieu Baerts, mptcp; +Cc: Christoph Paasch

On Mon, 2023-02-20 at 15:05 +0100, Matthieu Baerts wrote:
> Hi Paolo,
> 
> Thank you for your reply.
> 
> On 20/02/2023 12:51, Paolo Abeni wrote:
> > On Mon, 2023-02-20 at 12:14 +0100, Matthieu Baerts wrote:
> > > Hi Paolo,
> > > 
> > > On 17/02/2023 18:58, Matthieu Baerts wrote:
> > > > Hi Paolo,
> > > > 
> > > > On 09/02/2023 19:48, Paolo Abeni wrote:
> > > > > As reported by Christoph, the mptcp listener shutdown path is prone
> > > > > to an UaF issue.
> > > > 
> > > > Again, thank you for the two patches!
> > > > 
> > > > Now in our tree (fixes for -net) with my RvB tag.
> > > > 
> > > > I had a few conflicts when applying them on -net (and later when
> > > > propagating them to our tree, see below), I hope that's OK but don't
> > > > hesitate to double check if you have the opportunity :)
> > > 
> > > I probably didn't resolve the conflicts properly (or maybe some
> > > adaptations are needed on -net) because this introduce some regressions
> > > on export-net (not on net-next):
> > > 
> > > Any idea what could cause that?
> > > 
> > > https://github.com/multipath-tcp/mptcp_net-next/issues/361
> > 
> > I *think* it's not a conflict resolution issue; patch 1/2 here
> > functionally depends on ("mptcp: refactor passive socket
> > initialization") which let msk take a reference on unaccepted MPC
> > subflows, so that incoming reset will not free such subflow via
> > inet_child_forget().
> > 
> > I *think* the easier solution is moving this patches on export-next
> > after ("mptcp: refactor passive socket initialization"), mentioned
> > explicitly the functional dependency
> Indeed, it makes sense. I just did that.
> 
> I removed the Fixes tags and mentioned it was linked to the refactor of
> the passive socket initialization: is it OK? :)

I *think* we want to preserve the Fixes tag, to help stable team. We
can send fix to net-next in case like this. 

Anyway, no strong opinion on my side.

Thanks,

Paolo



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-20 14:30           ` Paolo Abeni
@ 2023-02-20 14:37             ` Matthieu Baerts
  2023-02-22 17:06               ` Matthieu Baerts
  0 siblings, 1 reply; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-20 14:37 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

On 20/02/2023 15:30, Paolo Abeni wrote:
> On Mon, 2023-02-20 at 15:05 +0100, Matthieu Baerts wrote:
>> Hi Paolo,
>>
>> Thank you for your reply.
>>
>> On 20/02/2023 12:51, Paolo Abeni wrote:
>>> On Mon, 2023-02-20 at 12:14 +0100, Matthieu Baerts wrote:
>>>> Hi Paolo,
>>>>
>>>> On 17/02/2023 18:58, Matthieu Baerts wrote:
>>>>> Hi Paolo,
>>>>>
>>>>> On 09/02/2023 19:48, Paolo Abeni wrote:
>>>>>> As reported by Christoph, the mptcp listener shutdown path is prone
>>>>>> to an UaF issue.
>>>>>
>>>>> Again, thank you for the two patches!
>>>>>
>>>>> Now in our tree (fixes for -net) with my RvB tag.
>>>>>
>>>>> I had a few conflicts when applying them on -net (and later when
>>>>> propagating them to our tree, see below), I hope that's OK but don't
>>>>> hesitate to double check if you have the opportunity :)
>>>>
>>>> I probably didn't resolve the conflicts properly (or maybe some
>>>> adaptations are needed on -net) because this introduce some regressions
>>>> on export-net (not on net-next):
>>>>
>>>> Any idea what could cause that?
>>>>
>>>> https://github.com/multipath-tcp/mptcp_net-next/issues/361
>>>
>>> I *think* it's not a conflict resolution issue; patch 1/2 here
>>> functionally depends on ("mptcp: refactor passive socket
>>> initialization") which let msk take a reference on unaccepted MPC
>>> subflows, so that incoming reset will not free such subflow via
>>> inet_child_forget().
>>>
>>> I *think* the easier solution is moving this patches on export-next
>>> after ("mptcp: refactor passive socket initialization"), mentioned
>>> explicitly the functional dependency
>> Indeed, it makes sense. I just did that.
>>
>> I removed the Fixes tags and mentioned it was linked to the refactor of
>> the passive socket initialization: is it OK? :)
> 
> I *think* we want to preserve the Fixes tag, to help stable team. We
> can send fix to net-next in case like this. 

It is not clear to me if this bug is visible without the patches that
are currently in our export branch. If yes, we can keep the Fixes but we
need to mark that it depends on the refactor not to confuse the stable team.

@Christoph: do you mind checking if you can reproduce the two issues
#346 and #347 on net or net-next? :)

Cheers,
Matt
-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown
  2023-02-20 14:37             ` Matthieu Baerts
@ 2023-02-22 17:06               ` Matthieu Baerts
  0 siblings, 0 replies; 17+ messages in thread
From: Matthieu Baerts @ 2023-02-22 17:06 UTC (permalink / raw)
  To: Paolo Abeni, mptcp; +Cc: Christoph Paasch

Hi Paolo,

On 20/02/2023 15:37, Matthieu Baerts wrote:
> On 20/02/2023 15:30, Paolo Abeni wrote:
>> On Mon, 2023-02-20 at 15:05 +0100, Matthieu Baerts wrote:
>>> Hi Paolo,
>>>
>>> Thank you for your reply.
>>>
>>> On 20/02/2023 12:51, Paolo Abeni wrote:
>>>> On Mon, 2023-02-20 at 12:14 +0100, Matthieu Baerts wrote:
>>>>> Hi Paolo,
>>>>>
>>>>> On 17/02/2023 18:58, Matthieu Baerts wrote:
>>>>>> Hi Paolo,
>>>>>>
>>>>>> On 09/02/2023 19:48, Paolo Abeni wrote:
>>>>>>> As reported by Christoph, the mptcp listener shutdown path is prone
>>>>>>> to an UaF issue.
>>>>>>
>>>>>> Again, thank you for the two patches!
>>>>>>
>>>>>> Now in our tree (fixes for -net) with my RvB tag.
>>>>>>
>>>>>> I had a few conflicts when applying them on -net (and later when
>>>>>> propagating them to our tree, see below), I hope that's OK but don't
>>>>>> hesitate to double check if you have the opportunity :)
>>>>>
>>>>> I probably didn't resolve the conflicts properly (or maybe some
>>>>> adaptations are needed on -net) because this introduce some regressions
>>>>> on export-net (not on net-next):
>>>>>
>>>>> Any idea what could cause that?
>>>>>
>>>>> https://github.com/multipath-tcp/mptcp_net-next/issues/361
>>>>
>>>> I *think* it's not a conflict resolution issue; patch 1/2 here
>>>> functionally depends on ("mptcp: refactor passive socket
>>>> initialization") which let msk take a reference on unaccepted MPC
>>>> subflows, so that incoming reset will not free such subflow via
>>>> inet_child_forget().
>>>>
>>>> I *think* the easier solution is moving this patches on export-next
>>>> after ("mptcp: refactor passive socket initialization"), mentioned
>>>> explicitly the functional dependency
>>> Indeed, it makes sense. I just did that.
>>>
>>> I removed the Fixes tags and mentioned it was linked to the refactor of
>>> the passive socket initialization: is it OK? :)
>>
>> I *think* we want to preserve the Fixes tag, to help stable team. We
>> can send fix to net-next in case like this. 
> 
> It is not clear to me if this bug is visible without the patches that
> are currently in our export branch. If yes, we can keep the Fixes but we
> need to mark that it depends on the refactor not to confuse the stable team.

As discussed on IRC, I moved the two patches back to -net + "mptcp:
refactor passive socket initialization":

New patches for t/upstream-net and t/upstream:
- eb67c35c665c: mptcp: refactor passive socket initialization (net)
- 315f247fb103: mptcp: use the workqueue to destroy unaccepted sockets (net)
- 26486b7946a6: mptcp: fix UaF in listener shutdown (net)
- Results: d9da79d599ab..8de0668c80e8 (export-net)

- 40500a9320da: conflict in t/mptcp-drop-legacy-code
- 0429d72c1ceb: conflict in
t/mptcp-use-the-workqueue-to-destroy-unaccepted-sockets-net-next
- Results: eb4a580f39ff..6d711df72d37 (export) # empty, as expected

Tests are now in progress:

https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export-net/20230222T170427
https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export/20230222T170427

Cheers,
Matt
-- 
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: mptcp: fix UaF in listener shutdown: Tests Results
  2023-02-08 21:44 [PATCH mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
  2023-02-08 23:09 ` mptcp: fix UaF in listener shutdown: Tests Results MPTCP CI
@ 2023-02-09 18:57 ` MPTCP CI
  1 sibling, 0 replies; 17+ messages in thread
From: MPTCP CI @ 2023-02-09 18:57 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp

Hi Paolo,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal (except selftest_mptcp_join):
  - Critical: 1 Call Trace(s) ❌:
  - Task: https://cirrus-ci.com/task/5329004899598336
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5329004899598336/summary/summary.txt

- KVM Validation: debug (except selftest_mptcp_join):
  - Critical: 1 Call Trace(s) ❌:
  - Task: https://cirrus-ci.com/task/5047529922887680
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5047529922887680/summary/summary.txt

- KVM Validation: normal (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/6454904806440960
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6454904806440960/summary/summary.txt

- KVM Validation: debug (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/6173429829730304
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6173429829730304/summary/summary.txt

Initiator: Patchew Applier
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/cbc3e5c367d4


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: mptcp: fix UaF in listener shutdown: Tests Results
  2023-02-08 21:44 [PATCH mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
@ 2023-02-08 23:09 ` MPTCP CI
  2023-02-09 18:57 ` MPTCP CI
  1 sibling, 0 replies; 17+ messages in thread
From: MPTCP CI @ 2023-02-08 23:09 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp

Hi Paolo,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal (except selftest_mptcp_join):
  - Critical: 1 Call Trace(s) ❌:
  - Task: https://cirrus-ci.com/task/4517118641700864
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/4517118641700864/summary/summary.txt

- KVM Validation: debug (except selftest_mptcp_join):
  - Unstable: 1 failed test(s): packetdrill_sockopts - Critical: 1 Call Trace(s) ❌:
  - Task: https://cirrus-ci.com/task/5080068595122176
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5080068595122176/summary/summary.txt

- KVM Validation: debug (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/6205968501964800
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6205968501964800/summary/summary.txt

- KVM Validation: normal (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/5643018548543488
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5643018548543488/summary/summary.txt

Initiator: Patchew Applier
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/ae7c0cbcdb5b


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2023-02-22 17:06 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-09 18:48 [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets Paolo Abeni
2023-02-09 18:48 ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
2023-02-10 12:18   ` mptcp: fix UaF in listener shutdown: Tests Results MPTCP CI
2023-02-17 17:58   ` [PATCH v2 mptcp-net 2/2] mptcp: fix UaF in listener shutdown Matthieu Baerts
2023-02-20 11:14     ` Matthieu Baerts
2023-02-20 11:51       ` Paolo Abeni
2023-02-20 14:05         ` Matthieu Baerts
2023-02-20 14:30           ` Paolo Abeni
2023-02-20 14:37             ` Matthieu Baerts
2023-02-22 17:06               ` Matthieu Baerts
2023-02-15 17:53 ` [PATCH v2 mptcp-net 1/2] mptcp: use the workqueue to destroy unaccepted sockets Matthieu Baerts
2023-02-16 14:58   ` Paolo Abeni
2023-02-16 15:07     ` Matthieu Baerts
2023-02-16 15:15       ` Paolo Abeni
2023-02-16 15:24         ` Matthieu Baerts
  -- strict thread matches above, loose matches on Subject: below --
2023-02-08 21:44 [PATCH mptcp-net 2/2] mptcp: fix UaF in listener shutdown Paolo Abeni
2023-02-08 23:09 ` mptcp: fix UaF in listener shutdown: Tests Results MPTCP CI
2023-02-09 18:57 ` MPTCP CI

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.