All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v2] net: mptcp, Fast Open Mechanism
@ 2022-01-16  0:12 Dmytro SHYTYI
  2022-01-16 13:15   ` kernel test robot
                   ` (2 more replies)
  0 siblings, 3 replies; 21+ messages in thread
From: Dmytro SHYTYI @ 2022-01-16  0:12 UTC (permalink / raw)
  To: mptcp; +Cc: Dmytro SHYTYI

This set of patches will bring "Fast Open" Option support to MPTCP.
The aim of Fast Open Mechanism is to eliminate one round trip
time from a TCP conversation by allowing data to be included as
part of the SYN segment that initiates the connection.

IETF RFC 8684: Appendix B.  TCP Fast Open and MPTCP.

[PATCH v2] includes "client-server" partial support for :
1. MPTCP cookie request from client.
2. MPTCP cookie offering from server.
3. MPTCP SYN+DATA+COOKIE from client.
4. subsequent write + read on the opened socket.

This patch is Work In Progress and an early draft shared due community
request.

Signed-off-by: Dmytro SHYTYI <dmytro@shytyi.net>
---
 include/linux/tcp.h             |  7 ++++
 net/ipv4/inet_connection_sock.c |  3 +-
 net/ipv4/tcp_fastopen.c         | 42 +++++++++++++++++++----
 net/ipv4/tcp_input.c            | 16 +++++----
 net/mptcp/protocol.c            | 59 ++++++++++++++++++++++++++++++---
 net/mptcp/sockopt.c             | 40 ++++++++++++++++++++++
 net/mptcp/subflow.c             | 14 ++++++++
 7 files changed, 162 insertions(+), 19 deletions(-)

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 48d8a363319e..d7092234d442 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -54,7 +54,14 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
 /* TCP Fast Open */
 #define TCP_FASTOPEN_COOKIE_MIN	4	/* Min Fast Open Cookie size in bytes */
 #define TCP_FASTOPEN_COOKIE_MAX	16	/* Max Fast Open Cookie size in bytes */
+
+#if IS_ENABLED(CONFIG_MPTCP)
+#define TCP_FASTOPEN_COOKIE_SIZE 4	/* the size employed by MPTCP impl. */
+#else
 #define TCP_FASTOPEN_COOKIE_SIZE 8	/* the size employed by this impl. */
+#endif
+
+
 
 /* TCP Fast Open Cookie as stored in memory */
 struct tcp_fastopen_cookie {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f7fea3a7c5e6..4b4159c0258d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -501,7 +501,8 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
 	req = reqsk_queue_remove(queue, sk);
 	newsk = req->sk;
 
-	if (sk->sk_protocol == IPPROTO_TCP &&
+	if ((sk->sk_protocol == IPPROTO_TCP ||
+	     sk->sk_protocol == IPPROTO_MPTCP) &&
 	    tcp_rsk(req)->tfo_listener) {
 		spin_lock_bh(&queue->fastopenq.lock);
 		if (tcp_rsk(req)->tfo_listener) {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index fdbcf2a6d08e..d26378983ed7 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -119,15 +119,26 @@ static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
 					     const siphash_key_t *key,
 					     struct tcp_fastopen_cookie *foc)
 {
+#if IS_ENABLED(CONFIG_MPTCP)
+	BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u32));
+#else
 	BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
+#endif
 
 	if (req->rsk_ops->family == AF_INET) {
 		const struct iphdr *iph = ip_hdr(syn);
 
+#if IS_ENABLED(CONFIG_MPTCP)
+		foc->val[0] = cpu_to_le32(siphash(&iph->saddr,
+						  sizeof(iph->saddr) +
+						  sizeof(iph->daddr),
+						  key));
+#else
 		foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
 					  sizeof(iph->saddr) +
 					  sizeof(iph->daddr),
 					  key));
+#endif
 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
 		return true;
 	}
@@ -149,6 +160,7 @@ static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
 /* Generate the fastopen cookie by applying SipHash to both the source and
  * destination addresses.
  */
+/*
 static void tcp_fastopen_cookie_gen(struct sock *sk,
 				    struct request_sock *req,
 				    struct sk_buff *syn,
@@ -162,6 +174,7 @@ static void tcp_fastopen_cookie_gen(struct sock *sk,
 		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
 	rcu_read_unlock();
 }
+*/
 
 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
  * queue this additional data / FIN.
@@ -291,12 +304,12 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
 	 */
 	return child;
 }
-
+/*
 static bool tcp_fastopen_queue_check(struct sock *sk)
 {
 	struct fastopen_queue *fastopenq;
 
-	/* Make sure the listener has enabled fastopen, and we don't
+	* Make sure the listener has enabled fastopen, and we don't
 	 * exceed the max # of pending TFO requests allowed before trying
 	 * to validating the cookie in order to avoid burning CPU cycles
 	 * unnecessarily.
@@ -305,7 +318,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
 	 * processing a cookie request is that clients can't differentiate
 	 * between qlen overflow causing Fast Open to be disabled
 	 * temporarily vs a server not supporting Fast Open at all.
-	 */
+	 *
 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
 	if (fastopenq->max_qlen == 0)
 		return false;
@@ -327,7 +340,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
 	}
 	return true;
 }
-
+*/
 static bool tcp_fastopen_no_cookie(const struct sock *sk,
 				   const struct dst_entry *dst,
 				   int flag)
@@ -346,28 +359,43 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
 			      struct tcp_fastopen_cookie *foc,
 			      const struct dst_entry *dst)
 {
+	/*
 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
 	int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
+	*/
 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
 	struct sock *child;
 	int ret = 0;
 
 	if (foc->len == 0) /* Client requests a cookie */
 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
-
+/*
 	if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
 	      (syn_data || foc->len >= 0) &&
 	      tcp_fastopen_queue_check(sk))) {
 		foc->len = -1;
 		return NULL;
 	}
-
+*/
 	if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
 		goto fastopen;
 
 	if (foc->len == 0) {
 		/* Client requests a cookie. */
-		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
+		//tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
+
+		struct tcp_fastopen_context *ctx;
+		struct iphdr *iph = ip_hdr(skb);
+
+		tcp_fastopen_init_key_once(sock_net(sk));
+		ctx = tcp_fastopen_get_ctx(sk);
+
+		valid_foc.val[0] = cpu_to_le32(siphash(&iph->saddr,
+						       sizeof(iph->saddr) +
+						       sizeof(iph->daddr),
+						       &ctx->key[0]));
+		valid_foc.len = TCP_FASTOPEN_COOKIE_SIZE;
+
 	} else if (foc->len > 0) {
 		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
 						    &valid_foc);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 246ab7b5e857..915570132014 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5908,7 +5908,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
 			} else {
 				tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
 			}
-
 			__tcp_ack_snd_check(sk, 0);
 no_ack:
 			if (eaten)
@@ -6229,9 +6228,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 		}
 		if (fastopen_fail)
 			return -1;
-		if (sk->sk_write_pending ||
-		    icsk->icsk_accept_queue.rskq_defer_accept ||
-		    inet_csk_in_pingpong_mode(sk)) {
+
+		if ((sk->sk_write_pending ||
+		     icsk->icsk_accept_queue.rskq_defer_accept ||
+		     inet_csk_in_pingpong_mode(sk)) && !th->syn) {
 			/* Save one ACK. Data will be ready after
 			 * several ticks, if write_pending is set.
 			 *
@@ -6243,9 +6243,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 			tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 						  TCP_DELACK_MAX, TCP_RTO_MAX);
-
 discard:
 			tcp_drop(sk, skb);
+			tcp_send_ack(sk);
+
 			return 0;
 		} else {
 			tcp_send_ack(sk);
@@ -6425,6 +6426,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
 		tcp_urg(sk, skb, th);
 		__kfree_skb(skb);
 		tcp_data_snd_check(sk);
+
 		return 0;
 	}
 
@@ -6901,7 +6903,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
 			 */
 			pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
 				    rsk_ops->family);
-			goto drop_and_release;
+			//goto drop_and_release;
 		}
 
 		isn = af_ops->init_seq(skb);
@@ -6954,7 +6956,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
 	reqsk_put(req);
 	return 0;
 
-drop_and_release:
+//drop_and_release:
 	dst_release(dst);
 drop_and_free:
 	__reqsk_free(req);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index cd6b11c9b54d..3020a9c95a31 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -52,6 +52,8 @@ static struct percpu_counter mptcp_sockets_allocated;
 
 static void __mptcp_destroy_sock(struct sock *sk);
 static void __mptcp_check_send_data_fin(struct sock *sk);
+static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+				int addr_len, int flags);
 
 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
 static struct net_device mptcp_napi_dev;
@@ -1677,6 +1679,53 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 	}
 }
 
+static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
+				  size_t len, struct mptcp_sock *msk, size_t copied)
+{
+	const struct iphdr *iph;
+	struct ubuf_info *uarg;
+	struct sockaddr *uaddr;
+	struct sk_buff *skb;
+	struct tcp_sock *tp;
+	struct socket *ssk;
+	int ret;
+
+	ssk = __mptcp_nmpc_socket(msk);
+	if (unlikely(!ssk))
+		goto out_EFAULT;
+	skb = sk_stream_alloc_skb(ssk->sk, 0, ssk->sk->sk_allocation, true);
+	if (unlikely(!skb))
+		goto out_EFAULT;
+	iph = ip_hdr(skb);
+	if (unlikely(!iph))
+		goto out_EFAULT;
+	uarg = msg_zerocopy_realloc(sk, len, skb_zcopy(skb));
+	if (unlikely(!uarg))
+		goto out_EFAULT;
+	uaddr = msg->msg_name;
+
+	tp = tcp_sk(ssk->sk);
+	if (unlikely(!tp))
+		goto out_EFAULT;
+	if (!tp->fastopen_req)
+		tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), ssk->sk->sk_allocation);
+
+	if (unlikely(!tp->fastopen_req))
+		goto out_EFAULT;
+	tp->fastopen_req->data = msg;
+	tp->fastopen_req->size = len;
+	tp->fastopen_req->uarg = uarg;
+
+	/* requests a cookie */
+	ret = mptcp_stream_connect(sk->sk_socket, uaddr,
+				   msg->msg_namelen, msg->msg_flags);
+
+	return ret;
+out_EFAULT:
+	ret = -EFAULT;
+	return ret;
+}
+
 static void mptcp_set_nospace(struct sock *sk)
 {
 	/* enable autotune */
@@ -1694,9 +1743,9 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 	int ret = 0;
 	long timeo;
 
-	/* we don't support FASTOPEN yet */
+	/* we don't fully support FASTOPEN yet */
 	if (msg->msg_flags & MSG_FASTOPEN)
-		return -EOPNOTSUPP;
+		ret = mptcp_sendmsg_fastopen(sk, msg, len, msk, copied);
 
 	/* silently ignore everything else */
 	msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL;
@@ -2482,10 +2531,10 @@ static void mptcp_worker(struct work_struct *work)
 
 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
 		__mptcp_close_subflow(msk);
-
+	/*
 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
 		__mptcp_retrans(sk);
-
+	*/
 unlock:
 	release_sock(sk);
 	sock_put(sk);
@@ -2589,6 +2638,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
 	case TCP_SYN_SENT:
 		tcp_disconnect(ssk, O_NONBLOCK);
 		break;
+	case TCP_ESTABLISHED:
+		break;
 	default:
 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
 			pr_debug("Fallback");
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 0f1e661c2032..0e471e31e72a 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -539,6 +539,7 @@ static bool mptcp_supported_sockopt(int level, int optname)
 		case TCP_TIMESTAMP:
 		case TCP_NOTSENT_LOWAT:
 		case TCP_TX_DELAY:
+		case TCP_FASTOPEN:
 			return true;
 		}
 
@@ -598,6 +599,43 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
 	return ret;
 }
 
+static int mptcp_setsockopt_sol_tcp_fastopen(struct mptcp_sock *msk, sockptr_t optval,
+					     unsigned int optlen)
+{
+	struct mptcp_subflow_context *subflow;
+	struct sock *sk = (struct sock *)msk;
+	struct net *net = sock_net(sk);
+	int val;
+	int ret;
+
+	ret = 0;
+
+	if (copy_from_sockptr(&val, optval, sizeof(val)))
+		return -EFAULT;
+
+	lock_sock(sk);
+
+	mptcp_for_each_subflow(msk, subflow) {
+		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+		lock_sock(ssk);
+
+		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+		    TCPF_LISTEN))) {
+			tcp_fastopen_init_key_once(net);
+			fastopen_queue_tune(sk, val);
+		} else {
+			ret = -EINVAL;
+		}
+
+		release_sock(ssk);
+	}
+
+	release_sock(sk);
+
+	return ret;
+}
+
 static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
 				    sockptr_t optval, unsigned int optlen)
 {
@@ -606,6 +644,8 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
 		return -EOPNOTSUPP;
 	case TCP_CONGESTION:
 		return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
+	case TCP_FASTOPEN:
+		return mptcp_setsockopt_sol_tcp_fastopen(msk, optval, optlen);
 	}
 
 	return -EOPNOTSUPP;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6172f380dfb7..82976b31f2f2 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -966,6 +966,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
 	trace_get_mapping_status(mpext);
 
 	data_len = mpext->data_len;
+
 	if (data_len == 0) {
 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
 		return MAPPING_INVALID;
@@ -1024,6 +1025,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
 		/* If this skb data are fully covered by the current mapping,
 		 * the new map would need caching, which is not supported
 		 */
+
 		if (skb_is_fully_mapped(ssk, skb)) {
 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
 			return MAPPING_INVALID;
@@ -1044,6 +1046,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
 	subflow->map_data_csum = csum_unfold(mpext->csum);
 
 	/* Cfr RFC 8684 Section 3.3.0 */
+
 	if (unlikely(subflow->map_csum_reqd != csum_reqd))
 		return MAPPING_INVALID;
 
@@ -1180,9 +1183,19 @@ static bool subflow_check_data_avail(struct sock *ssk)
 	}
 
 	if (subflow->mp_join || subflow->fully_established) {
+		skb = skb_peek(&ssk->sk_receive_queue);
+		subflow->map_valid = 1;
+		subflow->map_seq = READ_ONCE(msk->ack_seq);
+		subflow->map_data_len = skb->len;
+		subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+
+		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+		return true;
+
 		/* fatal protocol error, close the socket.
 		 * subflow_error_report() will introduce the appropriate barriers
 		 */
+		/*
 		ssk->sk_err = EBADMSG;
 		tcp_set_state(ssk, TCP_CLOSE);
 		subflow->reset_transient = 0;
@@ -1190,6 +1203,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
 		tcp_send_active_reset(ssk, GFP_ATOMIC);
 		WRITE_ONCE(subflow->data_avail, 0);
 		return false;
+		*/
 	}
 
 	__mptcp_do_fallback(msk);
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 21+ messages in thread
* Re: [PATCH net-next v2] net: mptcp, Fast Open Mechanism
  2022-01-16  0:12 [PATCH net-next v2] net: mptcp, Fast Open Mechanism Dmytro SHYTYI
  2022-01-16 13:15   ` kernel test robot
@ 2022-01-18 14:28 ` Dan Carpenter
  2022-01-17  9:58 ` Paolo Abeni
  2 siblings, 0 replies; 21+ messages in thread
From: kernel test robot @ 2022-01-16 18:41 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 9679 bytes --]

CC: kbuild-all(a)lists.01.org
In-Reply-To: <20220116001259.203319-1-dmytro@shytyi.net>
References: <20220116001259.203319-1-dmytro@shytyi.net>
TO: Dmytro SHYTYI <dmytro@shytyi.net>
TO: mptcp(a)lists.linux.dev
CC: Dmytro SHYTYI <dmytro@shytyi.net>

Hi Dmytro,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on v5.16]
[cannot apply to net-next/master linus/master next-20220116]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Dmytro-SHYTYI/net-mptcp-Fast-Open-Mechanism/20220116-081430
base:    df0cc57e057f18e44dac8e6c18aba47ab53202f9
:::::: branch date: 18 hours ago
:::::: commit date: 18 hours ago
config: x86_64-randconfig-m001 (https://download.01.org/0day-ci/archive/20220117/202201170247.BMTU5XYy-lkp(a)intel.com/config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

smatch warnings:
net/ipv4/tcp_input.c:6960 tcp_conn_request() warn: ignoring unreachable code.

vim +6960 net/ipv4/tcp_input.c

1fb6f159fd21c6 Octavian Purdila      2014-06-25  6864  
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6865  	tcp_clear_options(&tmp_opt);
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6866  	tmp_opt.mss_clamp = af_ops->mss_clamp;
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6867  	tmp_opt.user_mss  = tp->rx_opt.user_mss;
eed29f17f09ad7 Eric Dumazet          2017-06-07  6868  	tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
eed29f17f09ad7 Eric Dumazet          2017-06-07  6869  			  want_cookie ? NULL : &foc);
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6870  
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6871  	if (want_cookie && !tmp_opt.saw_tstamp)
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6872  		tcp_clear_options(&tmp_opt);
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6873  
bc58a1baf2a978 Hans Wippel           2018-03-23  6874  	if (IS_ENABLED(CONFIG_SMC) && want_cookie)
bc58a1baf2a978 Hans Wippel           2018-03-23  6875  		tmp_opt.smc_ok = 0;
bc58a1baf2a978 Hans Wippel           2018-03-23  6876  
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6877  	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6878  	tcp_openreq_init(req, &tmp_opt, skb, sk);
7a682575ad4829 KOVACS Krisztian      2016-09-23  6879  	inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent;
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6880  
16f86165bd0a94 Eric Dumazet          2015-03-13  6881  	/* Note: tcp_v6_init_req() might override ir_iif for link locals */
6dd9a14e92e548 David Ahern           2015-12-16  6882  	inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
16f86165bd0a94 Eric Dumazet          2015-03-13  6883  
7ea851d19b2359 Florian Westphal      2020-11-30  6884  	dst = af_ops->route_req(sk, skb, &fl, req);
7ea851d19b2359 Florian Westphal      2020-11-30  6885  	if (!dst)
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6886  		goto drop_and_free;
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6887  
84b114b98452c4 Eric Dumazet          2017-05-05  6888  	if (tmp_opt.tstamp_ok)
5d2ed0521ac98f Eric Dumazet          2017-06-07  6889  		tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
95a22caee396ce Florian Westphal      2016-12-01  6890  
f7b3bec6f5167e Florian Westphal      2014-11-03  6891  	if (!want_cookie && !isn) {
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6892  		/* Kill the following clause, if you dislike this way. */
4396e46187ca50 Soheil Hassas Yeganeh 2017-03-15  6893  		if (!net->ipv4.sysctl_tcp_syncookies &&
fee83d097b1620 Haishuang Yan         2016-12-28  6894  		    (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
fee83d097b1620 Haishuang Yan         2016-12-28  6895  		     (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
d82bae12dc38d7 Soheil Hassas Yeganeh 2017-03-15  6896  		    !tcp_peer_is_proven(req, dst)) {
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6897  			/* Without syncookies last quarter of
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6898  			 * backlog is filled with destinations,
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6899  			 * proven to be alive.
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6900  			 * It means that we continue to communicate
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6901  			 * to destinations, already remembered
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6902  			 * to the moment of synflood.
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6903  			 */
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6904  			pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6905  				    rsk_ops->family);
52c7bf82e2e91e Dmytro SHYTYI         2022-01-16  6906  			//goto drop_and_release;
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6907  		}
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6908  
84b114b98452c4 Eric Dumazet          2017-05-05  6909  		isn = af_ops->init_seq(skb);
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6910  	}
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6911  
f7b3bec6f5167e Florian Westphal      2014-11-03  6912  	tcp_ecn_create_request(req, skb, sk, dst);
f7b3bec6f5167e Florian Westphal      2014-11-03  6913  
f7b3bec6f5167e Florian Westphal      2014-11-03  6914  	if (want_cookie) {
f7b3bec6f5167e Florian Westphal      2014-11-03  6915  		isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
f7b3bec6f5167e Florian Westphal      2014-11-03  6916  		if (!tmp_opt.tstamp_ok)
f7b3bec6f5167e Florian Westphal      2014-11-03  6917  			inet_rsk(req)->ecn_ok = 0;
f7b3bec6f5167e Florian Westphal      2014-11-03  6918  	}
f7b3bec6f5167e Florian Westphal      2014-11-03  6919  
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6920  	tcp_rsk(req)->snt_isn = isn;
58d607d3e52f2b Eric Dumazet          2015-09-15  6921  	tcp_rsk(req)->txhash = net_tx_rndhash();
e9b12edc133b54 Wei Wang              2020-09-09  6922  	tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6923  	tcp_openreq_init_rwin(req, sk, dst);
c6345ce7d361dc Amritha Nambiar       2018-06-29  6924  	sk_rx_queue_set(req_to_sk(req), skb);
ca6fb06518836e Eric Dumazet          2015-10-02  6925  	if (!want_cookie) {
ca6fb06518836e Eric Dumazet          2015-10-02  6926  		tcp_reqsk_record_syn(sk, req, skb);
71c02379c762cb Christoph Paasch      2017-10-23  6927  		fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
ca6fb06518836e Eric Dumazet          2015-10-02  6928  	}
7c85af8810448d Eric Dumazet          2015-09-24  6929  	if (fastopen_sk) {
ca6fb06518836e Eric Dumazet          2015-10-02  6930  		af_ops->send_synack(fastopen_sk, dst, &fl, req,
331fca4315efa3 Martin KaFai Lau      2020-08-20  6931  				    &foc, TCP_SYNACK_FASTOPEN, skb);
7656d842de93fd Eric Dumazet          2015-10-04  6932  		/* Add the child socket directly into the accept queue */
9d3e1368bb4589 Guillaume Nault       2019-03-08  6933  		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
9d3e1368bb4589 Guillaume Nault       2019-03-08  6934  			reqsk_fastopen_remove(fastopen_sk, req, false);
9d3e1368bb4589 Guillaume Nault       2019-03-08  6935  			bh_unlock_sock(fastopen_sk);
9d3e1368bb4589 Guillaume Nault       2019-03-08  6936  			sock_put(fastopen_sk);
9403cf23025880 Guillaume Nault       2019-03-19  6937  			goto drop_and_free;
9d3e1368bb4589 Guillaume Nault       2019-03-08  6938  		}
7656d842de93fd Eric Dumazet          2015-10-04  6939  		sk->sk_data_ready(sk);
7656d842de93fd Eric Dumazet          2015-10-04  6940  		bh_unlock_sock(fastopen_sk);
7c85af8810448d Eric Dumazet          2015-09-24  6941  		sock_put(fastopen_sk);
7c85af8810448d Eric Dumazet          2015-09-24  6942  	} else {
9439ce00f208d9 Eric Dumazet          2015-03-17  6943  		tcp_rsk(req)->tfo_listener = false;
ca6fb06518836e Eric Dumazet          2015-10-02  6944  		if (!want_cookie)
8550f328f45db6 Lawrence Brakmo       2017-06-30  6945  			inet_csk_reqsk_queue_hash_add(sk, req,
8550f328f45db6 Lawrence Brakmo       2017-06-30  6946  				tcp_timeout_init((struct sock *)req));
b3d051477cf94e Eric Dumazet          2016-04-13  6947  		af_ops->send_synack(sk, dst, &fl, req, &foc,
b3d051477cf94e Eric Dumazet          2016-04-13  6948  				    !want_cookie ? TCP_SYNACK_NORMAL :
331fca4315efa3 Martin KaFai Lau      2020-08-20  6949  						   TCP_SYNACK_COOKIE,
331fca4315efa3 Martin KaFai Lau      2020-08-20  6950  				    skb);
9caad864151e52 Eric Dumazet          2016-04-01  6951  		if (want_cookie) {
9caad864151e52 Eric Dumazet          2016-04-01  6952  			reqsk_free(req);
9caad864151e52 Eric Dumazet          2016-04-01  6953  			return 0;
9caad864151e52 Eric Dumazet          2016-04-01  6954  		}
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6955  	}
ca6fb06518836e Eric Dumazet          2015-10-02  6956  	reqsk_put(req);
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6957  	return 0;
1fb6f159fd21c6 Octavian Purdila      2014-06-25  6958  
52c7bf82e2e91e Dmytro SHYTYI         2022-01-16  6959  //drop_and_release:
1fb6f159fd21c6 Octavian Purdila      2014-06-25 @6960  	dst_release(dst);

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2022-01-21  0:01 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-16  0:12 [PATCH net-next v2] net: mptcp, Fast Open Mechanism Dmytro SHYTYI
2022-01-16 13:15 ` kernel test robot
2022-01-16 13:15   ` kernel test robot
2022-01-16 21:24   ` Dmytro SHYTYI
2022-01-17  9:01     ` Matthieu Baerts
2022-01-17 21:08       ` Dmytro SHYTYI
2022-01-16 13:45 ` kernel test robot
2022-01-16 13:45   ` kernel test robot
2022-01-17  9:58 ` Paolo Abeni
2022-01-17 10:03   ` Paolo Abeni
2022-01-17 10:22     ` Matthieu Baerts
2022-01-17 21:51       ` Dmytro SHYTYI
2022-01-17 21:48     ` Dmytro SHYTYI
2022-01-18 11:02       ` Paolo Abeni
2022-01-19 17:35         ` Dmytro SHYTYI
2022-01-21  0:00           ` Dmytro SHYTYI
2022-01-17 21:39   ` Dmytro SHYTYI
2022-01-16 18:41 kernel test robot
2022-01-18 14:28 ` Dan Carpenter
2022-01-18 14:28 ` Dan Carpenter
2022-01-19 17:37 ` Dmytro SHYTYI

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.