mptcp.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Dmytro Shytyi <dmytro@shytyi.net>
To: mptcp@lists.linux.dev
Cc: Dmytro Shytyi <dmytro@shytyi.net>
Subject: [RFC PATCH mptcp-next v6 8/9] mptcp_fastopen_add_skb() helpers (skb to msk)
Date: Fri, 16 Sep 2022 01:56:03 +0200	[thread overview]
Message-ID: <20220915235604.26018-9-dmytro@shytyi.net> (raw)
In-Reply-To: <20220915235604.26018-1-dmytro@shytyi.net>

Set of helpers for mptcp_skb_add(). Some functions are inspired from tcp
fastopen.c file. This chain helps to add “skb” to “&msk->sk_receive_queue”
(“subflow_v4_conn_request”->”mptcp_conn_request”->”mptcp_try_fastopen”->
”mptcp_fastopen_create_child”->”mptcp_fastopen_add_skb”)

Signed-off-by: Dmytro Shytyi <dmytro@shytyi.net>
---
 net/mptcp/fastopen.c | 379 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 379 insertions(+)

diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index 7070d2a966c9..815bae37097a 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -172,3 +172,382 @@ void mptcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb, struct request
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 		tcp_fin(sk);
 }
+
+struct sock *mptcp_fastopen_create_child(struct sock *sk,
+					 struct sk_buff *skb,
+					 struct request_sock *req)
+{
+	struct request_sock_queue *r_sock_queue = &inet_csk(sk)->icsk_accept_queue;
+	struct tcp_sock *tp;
+	struct sock *child_sock;
+	bool own_req;
+
+	child_sock = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+							      NULL, &own_req);
+	if (!child_sock)
+		return NULL;
+
+	spin_lock(&r_sock_queue->fastopenq.lock);
+	r_sock_queue->fastopenq.qlen++;
+	spin_unlock(&r_sock_queue->fastopenq.lock);
+
+	tp = tcp_sk(child_sock);
+
+	rcu_assign_pointer(tp->fastopen_rsk, req);
+	tcp_rsk(req)->tfo_listener = true;
+
+	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+	tp->max_window = tp->snd_wnd;
+
+	inet_csk_reset_xmit_timer(child_sock, ICSK_TIME_RETRANS,
+				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+
+	refcount_set(&req->rsk_refcnt, 2);
+
+	tcp_init_transfer(child_sock, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
+
+	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+
+	mptcp_fastopen_add_skb(child_sock, skb, req);
+
+	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
+	tp->rcv_wup = tp->rcv_nxt;
+
+	return child_sock;
+}
+
+bool mptcp_fastopen_queue_check(struct sock *sk)
+{
+	struct fastopen_queue *fo_queue;
+	struct request_sock *req_sock;
+
+	fo_queue = &inet_csk(sk)->icsk_accept_queue.fastopenq;
+	if (fo_queue->max_qlen == 0)
+		return false;
+
+	if (fo_queue->qlen >= fo_queue->max_qlen) {
+		spin_lock(&fo_queue->lock);
+		req_sock = fo_queue->rskq_rst_head;
+		if (!req_sock || time_after(req_sock->rsk_timer.expires, jiffies)) {
+			spin_unlock(&fo_queue->lock);
+			return false;
+		}
+		fo_queue->rskq_rst_head = req_sock->dl_next;
+		fo_queue->qlen--;
+		spin_unlock(&fo_queue->lock);
+		reqsk_put(req_sock);
+	}
+	return true;
+}
+
+bool mptcp_fastopen_cookie_gen_cipher(struct request_sock *req,
+				      struct sk_buff *syn,
+				      const siphash_key_t *key,
+				      struct tcp_fastopen_cookie *foc)
+{
+	if (req->rsk_ops->family == AF_INET) {
+		const struct iphdr *iph = ip_hdr(syn);
+
+		foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
+					  sizeof(iph->saddr) +
+					  sizeof(iph->daddr),
+					  key));
+		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
+		return true;
+	}
+
+	return false;
+}
+
+void mptcp_fastopen_cookie_gen(struct sock *sk,
+			       struct request_sock *req,
+			       struct sk_buff *syn,
+			       struct tcp_fastopen_cookie *foc)
+{
+	struct tcp_fastopen_context *ctx;
+
+	rcu_read_lock();
+	ctx = tcp_fastopen_get_ctx(sk);
+	if (ctx)
+		mptcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
+	rcu_read_unlock();
+}
+
+int mptcp_fastopen_cookie_gen_check(struct sock *sk,
+				    struct request_sock *req,
+				    struct sk_buff *syn,
+				    struct tcp_fastopen_cookie *orig,
+				    struct tcp_fastopen_cookie *valid_foc)
+{
+	struct tcp_fastopen_cookie mptcp_search_foc = { .len = -1 };
+	struct tcp_fastopen_cookie *mptcp_foc = valid_foc;
+	struct tcp_fastopen_context *mptcp_fo_ctx;
+	int i, ret = 0;
+
+	rcu_read_lock();
+	mptcp_fo_ctx = tcp_fastopen_get_ctx(sk);
+	if (!mptcp_fo_ctx)
+		goto out;
+	for (i = 0; i < tcp_fastopen_context_len(mptcp_fo_ctx); i++) {
+		mptcp_fastopen_cookie_gen_cipher(req, syn, &mptcp_fo_ctx->key[i], mptcp_foc);
+		if (tcp_fastopen_cookie_match(mptcp_foc, orig)) {
+			ret = i + 1;
+			goto out;
+		}
+		mptcp_foc = &mptcp_search_foc;
+	}
+out:
+	rcu_read_unlock();
+	return ret;
+}
+
+bool mptcp_fastopen_no_cookie(const struct sock *sk,
+			      const struct dst_entry *dst,
+			      int flag)
+{
+	return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
+	       tcp_sk(sk)->fastopen_no_cookie ||
+	       (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
+}
+
+struct sock *mptcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+				struct request_sock *req,
+				struct tcp_fastopen_cookie *foc,
+				const struct dst_entry *dst)
+{
+	bool syn_data_status = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
+	struct tcp_fastopen_cookie valid_mptcp_foc = { .len = -1 };
+	struct sock *child_sock;
+	int ret = 0;
+
+	if ((syn_data_status || foc->len >= 0) &&
+	    mptcp_fastopen_queue_check(sk)) {
+		foc->len = -1;
+		return NULL;
+	}
+
+	if (mptcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
+		goto fastopen;
+
+	if (foc->len == 0) {
+		mptcp_fastopen_cookie_gen(sk, req, skb, &valid_mptcp_foc);
+	} else if (foc->len > 0) {
+		ret = mptcp_fastopen_cookie_gen_check(sk, req, skb, foc,
+						      &valid_mptcp_foc);
+		if (ret) {
+fastopen:
+			child_sock = mptcp_fastopen_create_child(sk, skb, req);
+			if (child_sock) {
+				if (ret == 2) {
+					valid_mptcp_foc.exp = foc->exp;
+					*foc = valid_mptcp_foc;
+				} else {
+					foc->len = -1;
+				}
+				return child_sock;
+			}
+		}
+	}
+	valid_mptcp_foc.exp = foc->exp;
+	*foc = valid_mptcp_foc;
+	return NULL;
+}
+
+int mptcp_conn_request(struct request_sock_ops *rsk_ops,
+		       const struct tcp_request_sock_ops *af_ops,
+		       struct sock *sk, struct sk_buff *skb)
+{
+	struct tcp_fastopen_cookie mptcp_foc = { .len = -1 };
+	struct tcp_options_received tmp_opt_rcvd;
+	__u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
+	struct tcp_sock *tp_sock = tcp_sk(sk);
+	struct sock *mptcp_fo_sk = NULL;
+	struct net *net = sock_net(sk);
+	struct request_sock *req_sock;
+	bool want_cookie = false;
+	struct dst_entry *dst;
+	struct flowi fl;
+
+	if (sk_acceptq_is_full(sk))
+		goto drop;
+
+	req_sock = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
+	if (!req_sock)
+		goto drop;
+
+	req_sock->syncookie = want_cookie;
+	tcp_rsk(req_sock)->af_specific = af_ops;
+	tcp_rsk(req_sock)->ts_off = 1;
+	tcp_rsk(req_sock)->is_mptcp = 1;
+
+	tcp_clear_options(&tmp_opt_rcvd);
+	tmp_opt_rcvd.mss_clamp = af_ops->mss_clamp;
+	tmp_opt_rcvd.user_mss  = tp_sock->rx_opt.user_mss;
+	tcp_parse_options(sock_net(sk), skb, &tmp_opt_rcvd, 0,
+			  want_cookie ? NULL : &mptcp_foc);
+
+	if (want_cookie && !tmp_opt_rcvd.saw_tstamp)
+		tcp_clear_options(&tmp_opt_rcvd);
+
+	if (IS_ENABLED(CONFIG_SMC) && want_cookie)
+		tmp_opt_rcvd.smc_ok = 0;
+
+	tmp_opt_rcvd.tstamp_ok = 0;
+	mptcp_openreq_init(req_sock, &tmp_opt_rcvd, skb, sk);
+	inet_rsk(req_sock)->no_srccheck = inet_sk(sk)->transparent;
+
+	inet_rsk(req_sock)->ir_iif = inet_request_bound_dev_if(sk, skb);
+
+	dst = af_ops->route_req(sk, skb, &fl, req_sock);
+	if (!dst)
+		goto drop_and_free;
+
+	if (tmp_opt_rcvd.tstamp_ok)
+		tcp_rsk(req_sock)->ts_off = af_ops->init_ts_off(net, skb);
+
+	if (!want_cookie && !isn) {
+		if (!net->ipv4.sysctl_tcp_syncookies &&
+		    (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+		     (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
+		    !tcp_peer_is_proven(req_sock, dst)) {
+			goto drop_and_release;
+		}
+
+		isn = af_ops->init_seq(skb);
+	}
+
+	mptcp_ecn_create_request(req_sock, skb, sk, dst);
+
+	if (want_cookie) {
+		isn = cookie_init_sequence(af_ops, sk, skb, &req_sock->mss);
+		if (!tmp_opt_rcvd.tstamp_ok)
+			inet_rsk(req_sock)->ecn_ok = 0;
+	}
+
+	tcp_rsk(req_sock)->snt_isn = isn;
+	tcp_rsk(req_sock)->txhash = net_tx_rndhash();
+	tcp_rsk(req_sock)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
+
+	tcp_openreq_init_rwin(req_sock, sk, dst);
+	sk_rx_queue_set(req_to_sk(req_sock), skb);
+	if (!want_cookie) {
+		mptcp_reqsk_record_syn(sk, req_sock, skb);
+		mptcp_fo_sk = mptcp_try_fastopen(sk, skb, req_sock, &mptcp_foc, dst);
+	}
+	if (mptcp_fo_sk) {
+		af_ops->send_synack(mptcp_fo_sk, dst, &fl, req_sock,
+				    &mptcp_foc, TCP_SYNACK_FASTOPEN, skb);
+		if (!inet_csk_reqsk_queue_add(sk, req_sock, mptcp_fo_sk)) {
+			reqsk_fastopen_remove(mptcp_fo_sk, req_sock, false);
+			bh_unlock_sock(mptcp_fo_sk);
+			sock_put(mptcp_fo_sk);
+			goto drop_and_free;
+		}
+		sk->sk_data_ready(sk);
+		bh_unlock_sock(mptcp_fo_sk);
+		sock_put(mptcp_fo_sk);
+	} else {
+		tcp_rsk(req_sock)->tfo_listener = false;
+		if (!want_cookie) {
+			req_sock->timeout = tcp_timeout_init((struct sock *)req_sock);
+			inet_csk_reqsk_queue_hash_add(sk, req_sock, req_sock->timeout);
+		}
+		af_ops->send_synack(sk, dst, &fl, req_sock, &mptcp_foc,
+				    !want_cookie ? TCP_SYNACK_NORMAL :
+						   TCP_SYNACK_COOKIE,
+				    skb);
+		if (want_cookie) {
+			reqsk_free(req_sock);
+			return 0;
+		}
+	}
+	reqsk_put(req_sock);
+	return 0;
+
+drop_and_release:
+	dst_release(dst);
+drop_and_free:
+	__reqsk_free(req_sock);
+drop:
+	tcp_listendrop(sk);
+	return 0;
+}
+
+void mptcp_reqsk_record_syn(const struct sock *sk,
+			    struct request_sock *req,
+			    const struct sk_buff *skb)
+{
+	if (tcp_sk(sk)->save_syn) {
+		u32 length = skb_network_header_len(skb) + tcp_hdrlen(skb);
+		struct saved_syn *svd_syn;
+		u32 mac_headerlen;
+		void *base;
+
+		if (tcp_sk(sk)->save_syn == 2) {
+			base = skb_mac_header(skb);
+			mac_headerlen = skb_mac_header_len(skb);
+			length += mac_headerlen;
+		} else {
+			base = skb_network_header(skb);
+			mac_headerlen = 0;
+		}
+
+		svd_syn = kmalloc(struct_size(svd_syn, data, length),
+				  GFP_ATOMIC);
+		if (svd_syn) {
+			svd_syn->mac_hdrlen = mac_headerlen;
+			svd_syn->network_hdrlen = skb_network_header_len(skb);
+			svd_syn->tcp_hdrlen = tcp_hdrlen(skb);
+			memcpy(svd_syn->data, base, length);
+			req->saved_syn = svd_syn;
+		}
+	}
+}
+
+void mptcp_ecn_create_request(struct request_sock *req,
+			      const struct sk_buff *skb,
+			      const struct sock *listen_sk,
+			      const struct dst_entry *dst)
+{
+	const struct tcphdr *thdr = tcp_hdr(skb);
+	const struct net *net = sock_net(listen_sk);
+	bool thdr_ecn = thdr->ece && thdr->cwr;
+	bool ect_stat, ecn_okay;
+	u32 ecn_okay_dst;
+
+	if (!thdr_ecn)
+		return;
+
+	ect_stat = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
+	ecn_okay_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
+	ecn_okay = net->ipv4.sysctl_tcp_ecn || ecn_okay_dst;
+
+	if (((!ect_stat || thdr->res1) && ecn_okay) || tcp_ca_needs_ecn(listen_sk) ||
+	    (ecn_okay_dst & DST_FEATURE_ECN_CA) ||
+	    tcp_bpf_ca_needs_ecn((struct sock *)req))
+		inet_rsk(req)->ecn_ok = 1;
+}
+
+void mptcp_openreq_init(struct request_sock *req,
+			const struct tcp_options_received *rx_opt,
+			struct sk_buff *skb, const struct sock *sk)
+{
+	struct inet_request_sock *ireq = inet_rsk(req);
+
+	req->rsk_rcv_wnd = 0;
+	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+	tcp_rsk(req)->snt_synack = 0;
+	tcp_rsk(req)->last_oow_ack_time = 0;
+	req->mss = rx_opt->mss_clamp;
+	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
+	ireq->tstamp_ok = rx_opt->tstamp_ok;
+	ireq->sack_ok = rx_opt->sack_ok;
+	ireq->snd_wscale = rx_opt->snd_wscale;
+	ireq->wscale_ok = rx_opt->wscale_ok;
+	ireq->acked = 0;
+	ireq->ecn_ok = 0;
+	ireq->ir_rmt_port = tcp_hdr(skb)->source;
+	ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
+	ireq->ir_mark = inet_request_mark(sk, skb);
+}
-- 
2.25.1



  parent reply	other threads:[~2022-09-15 23:56 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-15 23:55 [RFC PATCH mptcp-next v6 0/9] mptcp: Fast Open Mechanism Dmytro Shytyi
2022-09-15 23:55 ` [RFC PATCH mptcp-next v6 1/9] Add separate fastopen.c file Dmytro Shytyi
2022-09-15 23:55 ` [RFC PATCH mptcp-next v6 2/9] Initiator: MSG_FASTOPEN sendto(). request cookie Dmytro Shytyi
2022-09-15 23:55 ` [RFC PATCH mptcp-next v6 3/9] rfree(), rmem_uncharge() prototypes to protocol.h Dmytro Shytyi
2022-09-15 23:55 ` [RFC PATCH mptcp-next v6 4/9] Initiator: add locks() to mptcp_sendmsg_fastopen Dmytro Shytyi
2022-09-15 23:56 ` [RFC PATCH mptcp-next v6 5/9] Fix unxpctd val of subflow->map_seq(dscrd packet) Dmytro Shytyi
2022-09-15 23:56 ` [RFC PATCH mptcp-next v6 6/9] mptfo variables for msk, options. Fix loop retrans Dmytro Shytyi
2022-09-15 23:56 ` [RFC PATCH mptcp-next v6 7/9] Listener: Add received skb to msk Dmytro Shytyi
2022-09-15 23:56 ` Dmytro Shytyi [this message]
2022-09-15 23:56 ` [RFC PATCH mptcp-next v6 9/9] selftests: mptfo initiator/listener Dmytro Shytyi
2022-09-16  1:35   ` selftests: mptfo initiator/listener: Build Failure MPTCP CI

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220915235604.26018-9-dmytro@shytyi.net \
    --to=dmytro@shytyi.net \
    --cc=mptcp@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).