linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC] tcp: Consider mtu probing for tcp_xmit_size_goal
@ 2021-04-26  2:22 Leonard Crestez
  2021-04-26 15:40 ` Neal Cardwell
  0 siblings, 1 reply; 3+ messages in thread
From: Leonard Crestez @ 2021-04-26  2:22 UTC (permalink / raw)
  To: Matt Mathis, Neal Cardwell
  Cc: David S. Miller, Eric Dumazet, Willem de Bruijn, Jakub Kicinski,
	Hideaki YOSHIFUJI, David Ahern, John Heffner,
	Soheil Hassas Yeganeh, Roopa Prabhu, Leonard Crestez, netdev,
	linux-kernel

According to RFC4821 Section 7.4 "Protocols MAY delay sending non-probes
in order to accumulate enough data" but linux almost never does that.

Linux checks for probe_size + (1 + retries) * mss_cache to be available
in the send buffer and if that condition is not met it will send anyway
using the current MSS. The feature can be made to work by sending very
large chunks of data from userspace (for example 128k) but for small
writes on fast links tcp mtu probes almost never happen.

This patch tries to take mtu probe into account in tcp_xmit_size_goal, a
function which otherwise attempts to accumulate a packet suitable for
TSO. No delays are introduced beyond existing autocork heuristics.

Suggested-by: Matt Mathis <mattmathis@google.com>
Signed-off-by: Leonard Crestez <lcrestez@drivenets.com>
---
 Documentation/networking/ip-sysctl.rst |  3 +++
 include/net/inet_connection_sock.h     |  7 +++++-
 include/net/netns/ipv4.h               |  1 +
 include/net/tcp.h                      |  3 +++
 net/ipv4/sysctl_net_ipv4.c             |  7 ++++++
 net/ipv4/tcp.c                         | 11 ++++++++-
 net/ipv4/tcp_output.c                  | 33 +++++++++++++++++++++++---
 7 files changed, 60 insertions(+), 5 deletions(-)

Previously:
https://lore.kernel.org/netdev/d7fbf3d3a2490d0a9e99945593ada243da58e0f8.1619000255.git.cdleonard@gmail.com/T/#u

diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index c2ecc9894fd0..36b8964abbb3 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -320,10 +320,13 @@ tcp_mtu_probe_floor - INTEGER
 	If MTU probing is enabled this caps the minimum MSS used for search_low
 	for the connection.
 
 	Default : 48
 
+tcp_mtu_probe_autocork - INTEGER
+	Take into account mtu probe size when accumulating data via autocorking.
+
 tcp_min_snd_mss - INTEGER
 	TCP SYN and SYNACK messages usually advertise an ADVMSS option,
 	as described in RFC 1122 and RFC 6691.
 
 	If this ADVMSS option is smaller than tcp_min_snd_mss,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 3c8c59471bc1..19afcc7a4f4a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -123,15 +123,20 @@ struct inet_connection_sock {
 		/* Range of MTUs to search */
 		int		  search_high;
 		int		  search_low;
 
 		/* Information on the current probe. */
-		u32		  probe_size:31,
+		u32		  probe_size:30,
+		/* Are we actively accumulating data for an mtu probe? */
+				  wait_data:1,
 		/* Is the MTUP feature enabled for this connection? */
 				  enabled:1;
 
 		u32		  probe_timestamp;
+
+		/* Timer for wait_data */
+		struct	  timer_list	wait_data_timer;
 	} icsk_mtup;
 	u32			  icsk_probes_tstamp;
 	u32			  icsk_user_timeout;
 
 	u64			  icsk_ca_priv[104 / sizeof(u64)];
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 87e1612497ea..2afe98422441 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -122,10 +122,11 @@ struct netns_ipv4 {
 #ifdef CONFIG_NET_L3_MASTER_DEV
 	u8 sysctl_tcp_l3mdev_accept;
 #endif
 	u8 sysctl_tcp_mtu_probing;
 	int sysctl_tcp_mtu_probe_floor;
+	int sysctl_tcp_mtu_probe_autocork;
 	int sysctl_tcp_base_mss;
 	int sysctl_tcp_min_snd_mss;
 	int sysctl_tcp_probe_threshold;
 	u32 sysctl_tcp_probe_interval;
 
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eaea43afcc97..c3eaae3bf7d6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -666,10 +666,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 void tcp_initialize_rcv_mss(struct sock *sk);
 
 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 int tcp_mss_to_mtu(struct sock *sk, int mss);
 void tcp_mtup_init(struct sock *sk);
+int tcp_mtu_probe_size_needed(struct sock *sk);
 
 static inline void tcp_bound_rto(const struct sock *sk)
 {
 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
@@ -1375,10 +1376,12 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
 	s32 delta;
 
 	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
 	    ca_ops->cong_control)
 		return;
+	if (inet_csk(sk)->icsk_mtup.wait_data)
+		return;
 	delta = tcp_jiffies32 - tp->lsndtime;
 	if (delta > inet_csk(sk)->icsk_rto)
 		tcp_cwnd_restart(sk, delta);
 }
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index a62934b9f15a..e19176c17973 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -827,10 +827,17 @@ static struct ctl_table ipv4_net_table[] = {
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &tcp_min_snd_mss_min,
 		.extra2		= &tcp_min_snd_mss_max,
 	},
+	{
+		.procname	= "tcp_mtu_probe_autocork",
+		.data		= &init_net.ipv4.sysctl_tcp_mtu_probe_autocork,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 	{
 		.procname	= "tcp_probe_threshold",
 		.data		= &init_net.ipv4.sysctl_tcp_probe_threshold,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e14fd0c50c10..6341a87e9388 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -913,10 +913,11 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
 }
 
 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
 				       int large_allowed)
 {
+	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	u32 new_size_goal, size_goal;
 
 	if (!large_allowed)
 		return mss_now;
@@ -932,11 +933,19 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
 		tp->gso_segs = min_t(u16, new_size_goal / mss_now,
 				     sk->sk_gso_max_segs);
 		size_goal = tp->gso_segs * mss_now;
 	}
 
-	return max(size_goal, mss_now);
+	size_goal = max(size_goal, mss_now);
+
+	if (unlikely(icsk->icsk_mtup.wait_data)) {
+		int mtu_probe_size_needed = tcp_mtu_probe_size_needed(sk);
+		if (mtu_probe_size_needed > 0)
+			size_goal = max(size_goal, (u32)mtu_probe_size_needed);
+	}
+
+	return size_goal;
 }
 
 int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 {
 	int mss_now;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bde781f46b41..c15ed548a48a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2311,10 +2311,23 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
 	}
 
 	return true;
 }
 
+int tcp_mtu_probe_size_needed(struct sock *sk)
+{
+	struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+	int probe_size;
+	int size_needed;
+
+	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + icsk->icsk_mtup.search_low) >> 1);
+	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
+
+	return size_needed;
+}
+
 /* Create a new MTU probe if we are ready.
  * MTU probe is regularly attempting to increase the path MTU by
  * deliberately sending larger packets.  This discovers routing
  * changes resulting in larger path MTUs.
  *
@@ -2366,16 +2379,18 @@ static int tcp_mtu_probe(struct sock *sk)
 		 */
 		tcp_mtu_check_reprobe(sk);
 		return -1;
 	}
 
+	/* Can probe ever fit inside window? */
+	if (tp->snd_wnd < size_needed)
+		return -1;
+
 	/* Have enough data in the send queue to probe? */
 	if (tp->write_seq - tp->snd_nxt < size_needed)
-		return -1;
+		return net->ipv4.sysctl_tcp_mtu_probe_autocork ? 0 : -1;
 
-	if (tp->snd_wnd < size_needed)
-		return -1;
 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
 		return 0;
 
 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
@@ -2596,28 +2611,40 @@ void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
  * but cannot send anything now because of SWS or another problem.
  */
 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 			   int push_one, gfp_t gfp)
 {
+	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
+	struct net *net = sock_net(sk);
 	struct sk_buff *skb;
 	unsigned int tso_segs, sent_pkts;
 	int cwnd_quota;
 	int result;
 	bool is_cwnd_limited = false, is_rwnd_limited = false;
 	u32 max_segs;
 
 	sent_pkts = 0;
 
 	tcp_mstamp_refresh(tp);
+	/*
+	 * Waiting for tcp probe data also applies when push_one=1
+	 * If user does many small writes we hold them until we have have enough
+	 * for a probe.
+	 */
 	if (!push_one) {
 		/* Do MTU probing. */
 		result = tcp_mtu_probe(sk);
 		if (!result) {
+			if (net->ipv4.sysctl_tcp_mtu_probe_autocork)
+				icsk->icsk_mtup.wait_data = true;
 			return false;
 		} else if (result > 0) {
+			icsk->icsk_mtup.wait_data = false;
 			sent_pkts = 1;
+		} else {
+			icsk->icsk_mtup.wait_data = false;
 		}
 	}
 
 	max_segs = tcp_tso_segs(sk, mss_now);
 	while ((skb = tcp_send_head(sk))) {

base-commit: 8203c7ce4ef2840929d38b447b4ccd384727f92b
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [RFC] tcp: Consider mtu probing for tcp_xmit_size_goal
  2021-04-26  2:22 [RFC] tcp: Consider mtu probing for tcp_xmit_size_goal Leonard Crestez
@ 2021-04-26 15:40 ` Neal Cardwell
  2021-04-26 16:58   ` Leonard Crestez
  0 siblings, 1 reply; 3+ messages in thread
From: Neal Cardwell @ 2021-04-26 15:40 UTC (permalink / raw)
  To: Leonard Crestez
  Cc: Matt Mathis, David S. Miller, Eric Dumazet, Willem de Bruijn,
	Jakub Kicinski, Hideaki YOSHIFUJI, David Ahern, John Heffner,
	Soheil Hassas Yeganeh, Roopa Prabhu, Leonard Crestez, netdev,
	linux-kernel

Thanks for the patch. Some thoughts, in-line below:

On Sun, Apr 25, 2021 at 10:22 PM Leonard Crestez <lcrestez@drivenets.com> wrote:
>
> According to RFC4821 Section 7.4 "Protocols MAY delay sending non-probes
> in order to accumulate enough data" but linux almost never does that.
>
> Linux checks for probe_size + (1 + retries) * mss_cache to be available

I think this means to say "reordering" rather than "retries"?

> in the send buffer and if that condition is not met it will send anyway
> using the current MSS. The feature can be made to work by sending very
> large chunks of data from userspace (for example 128k) but for small
> writes on fast links tcp mtu probes almost never happen.
>
> This patch tries to take mtu probe into account in tcp_xmit_size_goal, a
> function which otherwise attempts to accumulate a packet suitable for
> TSO. No delays are introduced beyond existing autocork heuristics.

I would suggest phrasing this paragraph as something like:

  This patch generalizes tcp_xmit_size_goal(), a
  function which is used in autocorking in order to attempt to
  accumulate suitable transmit sizes, so that it takes into account
  not only TSO and receive window, but now also transmit
  sizes needed for efficient PMTU discovery (when a flow is
  eligible for PMTU discovery).

>
> Suggested-by: Matt Mathis <mattmathis@google.com>
> Signed-off-by: Leonard Crestez <lcrestez@drivenets.com>
> ---
...
> +
> +               /* Timer for wait_data */
> +               struct    timer_list    wait_data_timer;

This timer_list seems left over from a previous patch version, and no
longer used?

> @@ -1375,10 +1376,12 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
>         s32 delta;
>
>         if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
>             ca_ops->cong_control)
>                 return;
> +       if (inet_csk(sk)->icsk_mtup.wait_data)
> +               return;
>         delta = tcp_jiffies32 - tp->lsndtime;
>         if (delta > inet_csk(sk)->icsk_rto)
>                 tcp_cwnd_restart(sk, delta);
>  }

I would argue that the MTU probing heuristics should not override
congestion control. If congestion control thinks it's proper to reset
the cwnd to a lower value due to the connection being idle, then this
should happen irrespective of MTU probing activity.

>
>  int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
>  {
>         int mss_now;
> diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
> index bde781f46b41..c15ed548a48a 100644
> --- a/net/ipv4/tcp_output.c
> +++ b/net/ipv4/tcp_output.c
> @@ -2311,10 +2311,23 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
>         }
>
>         return true;
>  }
>
> +int tcp_mtu_probe_size_needed(struct sock *sk)
> +{
> +       struct inet_connection_sock *icsk = inet_csk(sk);
> +       struct tcp_sock *tp = tcp_sk(sk);
> +       int probe_size;
> +       int size_needed;
> +
> +       probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + icsk->icsk_mtup.search_low) >> 1);
> +       size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;

Adding this helper without refactoring tcp_mtu_probe() to use this
helper would leave some significant unnecessary code duplication. To
avoid duplication, AFAICT your tcp_mtu_probe() should call your new
helper.

You may also want to make this little helper static inline, since
AFAICT it is called in some hot paths.

>  static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
>                            int push_one, gfp_t gfp)
>  {
> +       struct inet_connection_sock *icsk = inet_csk(sk);
>         struct tcp_sock *tp = tcp_sk(sk);
> +       struct net *net = sock_net(sk);
>         struct sk_buff *skb;
>         unsigned int tso_segs, sent_pkts;
>         int cwnd_quota;
>         int result;
>         bool is_cwnd_limited = false, is_rwnd_limited = false;
>         u32 max_segs;
>
>         sent_pkts = 0;
>
>         tcp_mstamp_refresh(tp);
> +       /*
> +        * Waiting for tcp probe data also applies when push_one=1
> +        * If user does many small writes we hold them until we have have enough
> +        * for a probe.
> +        */

This comment seems incomplete; with this patch AFAICT small writes are
not simply always held unconditionally until there is enough data for
a full-sized MTU probe. Rather the autocorking mechanism is used, so
that if the flow if a flow is eligible for MTU probes, autocorking
attempts to wait for a full amount of data with which to probe (and as
is the case with autocorking, if this does not become available then
when the local send queues are empty then the flow sends out whatever
it has).

Also I think it would be better to place this comment where you add
new code to tcp_xmit_size_goal(), so that the comment is located near
the corresponding key code, rather than in the very general
tcp_write_xmit() function.

best,
neal

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [RFC] tcp: Consider mtu probing for tcp_xmit_size_goal
  2021-04-26 15:40 ` Neal Cardwell
@ 2021-04-26 16:58   ` Leonard Crestez
  0 siblings, 0 replies; 3+ messages in thread
From: Leonard Crestez @ 2021-04-26 16:58 UTC (permalink / raw)
  To: Neal Cardwell, Matt Mathis
  Cc: David S. Miller, Eric Dumazet, Willem de Bruijn, Jakub Kicinski,
	Hideaki YOSHIFUJI, David Ahern, John Heffner,
	Soheil Hassas Yeganeh, Roopa Prabhu, Leonard Crestez, netdev,
	linux-kernel



On 26.04.2021 18:40, Neal Cardwell wrote:
> Thanks for the patch. Some thoughts, in-line below:
> 
> On Sun, Apr 25, 2021 at 10:22 PM Leonard Crestez <lcrestez@drivenets.com> wrote:
>>
>> According to RFC4821 Section 7.4 "Protocols MAY delay sending non-probes
>> in order to accumulate enough data" but linux almost never does that.
>>
>> Linux checks for probe_size + (1 + retries) * mss_cache to be available
> 
> I think this means to say "reordering" rather than "retries"?
> 
>> in the send buffer and if that condition is not met it will send anyway
>> using the current MSS. The feature can be made to work by sending very
>> large chunks of data from userspace (for example 128k) but for small
>> writes on fast links tcp mtu probes almost never happen.
>>
>> This patch tries to take mtu probe into account in tcp_xmit_size_goal, a
>> function which otherwise attempts to accumulate a packet suitable for
>> TSO. No delays are introduced beyond existing autocork heuristics.
> 
> I would suggest phrasing this paragraph as something like:
> 
>    This patch generalizes tcp_xmit_size_goal(), a
>    function which is used in autocorking in order to attempt to
>    accumulate suitable transmit sizes, so that it takes into account
>    not only TSO and receive window, but now also transmit
>    sizes needed for efficient PMTU discovery (when a flow is
>    eligible for PMTU discovery).
> 
>>
>> Suggested-by: Matt Mathis <mattmathis@google.com>
>> Signed-off-by: Leonard Crestez <lcrestez@drivenets.com>
>> ---
> ...
>> +
>> +               /* Timer for wait_data */
>> +               struct    timer_list    wait_data_timer;
> 
> This timer_list seems left over from a previous patch version, and no
> longer used?

Yes, I'll most a cleaned-up version.

>> @@ -1375,10 +1376,12 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
>>          s32 delta;
>>
>>          if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
>>              ca_ops->cong_control)
>>                  return;
>> +       if (inet_csk(sk)->icsk_mtup.wait_data)
>> +               return;
>>          delta = tcp_jiffies32 - tp->lsndtime;
>>          if (delta > inet_csk(sk)->icsk_rto)
>>                  tcp_cwnd_restart(sk, delta);
>>   }
> 
> I would argue that the MTU probing heuristics should not override
> congestion control. If congestion control thinks it's proper to reset
> the cwnd to a lower value due to the connection being idle, then this
> should happen irrespective of MTU probing activity.

This is a hack from the previous version, removed.

>>   int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
>>   {
>>          int mss_now;
>> diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
>> index bde781f46b41..c15ed548a48a 100644
>> --- a/net/ipv4/tcp_output.c
>> +++ b/net/ipv4/tcp_output.c
>> @@ -2311,10 +2311,23 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
>>          }
>>
>>          return true;
>>   }
>>
>> +int tcp_mtu_probe_size_needed(struct sock *sk)
>> +{
>> +       struct inet_connection_sock *icsk = inet_csk(sk);
>> +       struct tcp_sock *tp = tcp_sk(sk);
>> +       int probe_size;
>> +       int size_needed;
>> +
>> +       probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + icsk->icsk_mtup.search_low) >> 1);
>> +       size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
> 
> Adding this helper without refactoring tcp_mtu_probe() to use this
> helper would leave some significant unnecessary code duplication. To
> avoid duplication, AFAICT your tcp_mtu_probe() should call your new
> helper.

Yes. This function should also check if RACK is enabled and try to send 
smaller probes in that case.

> You may also want to make this little helper static inline, since
> AFAICT it is called in some hot paths.

It's only called when tp->icsk_mtup.wait_data is marked true so only for 
a brief while every ten minutes or so. It is possible that this "wait" 
stretches for a long time though.

>>   static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
>>                             int push_one, gfp_t gfp)
>>   {
>> +       struct inet_connection_sock *icsk = inet_csk(sk);
>>          struct tcp_sock *tp = tcp_sk(sk);
>> +       struct net *net = sock_net(sk);
>>          struct sk_buff *skb;
>>          unsigned int tso_segs, sent_pkts;
>>          int cwnd_quota;
>>          int result;
>>          bool is_cwnd_limited = false, is_rwnd_limited = false;
>>          u32 max_segs;
>>
>>          sent_pkts = 0;
>>
>>          tcp_mstamp_refresh(tp);
>> +       /*
>> +        * Waiting for tcp probe data also applies when push_one=1
>> +        * If user does many small writes we hold them until we have have enough
>> +        * for a probe.
>> +        */
> 
> This comment seems incomplete; with this patch AFAICT small writes are
> not simply always held unconditionally until there is enough data for
> a full-sized MTU probe. Rather the autocorking mechanism is used, so
> that if the flow if a flow is eligible for MTU probes, autocorking
> attempts to wait for a full amount of data with which to probe (and as
> is the case with autocorking, if this does not become available then
> when the local send queues are empty then the flow sends out whatever
> it has).

Comment is from a previous version, should be deleted.

--
Regards,
Leonard

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-04-26 17:06 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-26  2:22 [RFC] tcp: Consider mtu probing for tcp_xmit_size_goal Leonard Crestez
2021-04-26 15:40 ` Neal Cardwell
2021-04-26 16:58   ` Leonard Crestez

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).