All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] tcp: adjust rcvbuff according copied rate of user space
@ 2022-08-09  8:47 Yonglong Li
  2022-08-09 21:28 ` Neal Cardwell
  0 siblings, 1 reply; 3+ messages in thread
From: Yonglong Li @ 2022-08-09  8:47 UTC (permalink / raw)
  To: netdev; +Cc: davem, edumazet, dsahern, kuba, pabeni, liyonglong

it is more reasonable to adjust rcvbuff by copied rate instead
of copied buff len.

Signed-off-by: Yonglong Li <liyonglong@chinatelecom.cn>
---
 include/linux/tcp.h  |  1 +
 net/ipv4/tcp_input.c | 16 +++++++++++++---
 2 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a9fbe22..18e091c 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -410,6 +410,7 @@ struct tcp_sock {
 		u32	space;
 		u32	seq;
 		u64	time;
+		u32	prior_rate;
 	} rcvq_space;
 
 /* TCP-specific MTU probe information. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ab5f0ea..2bdf2a5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -544,6 +544,7 @@ static void tcp_init_buffer_space(struct sock *sk)
 	tcp_mstamp_refresh(tp);
 	tp->rcvq_space.time = tp->tcp_mstamp;
 	tp->rcvq_space.seq = tp->copied_seq;
+	tp->rcvq_space.prior_rate = 0;
 
 	maxwin = tcp_full_space(sk);
 
@@ -701,6 +702,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 void tcp_rcv_space_adjust(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	u64 pre_copied_rate, copied_rate;
 	u32 copied;
 	int time;
 
@@ -713,7 +715,14 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
 	/* Number of bytes copied to user in last RTT */
 	copied = tp->copied_seq - tp->rcvq_space.seq;
-	if (copied <= tp->rcvq_space.space)
+	copied_rate = copied * USEC_PER_SEC;
+	do_div(copied_rate, time);
+	pre_copied_rate = tp->rcvq_space.prior_rate;
+	if (!tp->rcvq_space.prior_rate) {
+		pre_copied_rate = tp->rcvq_space.space * USEC_PER_SEC;
+		do_div(pre_copied_rate, time);
+	}
+	if (copied_rate <= pre_copied_rate || !pre_copied_rate)
 		goto new_measure;
 
 	/* A bit of theory :
@@ -736,8 +745,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
 		rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
 
 		/* Accommodate for sender rate increase (eg. slow start) */
-		grow = rcvwin * (copied - tp->rcvq_space.space);
-		do_div(grow, tp->rcvq_space.space);
+		grow = rcvwin * (copied_rate - pre_copied_rate);
+		do_div(grow, pre_copied_rate);
 		rcvwin += (grow << 1);
 
 		rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
@@ -755,6 +764,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
 		}
 	}
 	tp->rcvq_space.space = copied;
+	tp->rcvq_space.prior_rate = pre_copied_rate;
 
 new_measure:
 	tp->rcvq_space.seq = tp->copied_seq;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] tcp: adjust rcvbuff according copied rate of user space
  2022-08-09  8:47 [PATCH] tcp: adjust rcvbuff according copied rate of user space Yonglong Li
@ 2022-08-09 21:28 ` Neal Cardwell
  2022-08-10  7:24   ` Yonglong Li
  0 siblings, 1 reply; 3+ messages in thread
From: Neal Cardwell @ 2022-08-09 21:28 UTC (permalink / raw)
  To: Yonglong Li; +Cc: netdev, davem, edumazet, dsahern, kuba, pabeni, Yuchung Cheng

On Tue, Aug 9, 2022 at 4:48 AM Yonglong Li <liyonglong@chinatelecom.cn> wrote:
>
> it is more reasonable to adjust rcvbuff by copied rate instead
> of copied buff len.
>
> Signed-off-by: Yonglong Li <liyonglong@chinatelecom.cn>
> ---

Thanks for sending out the patch. My sense is that this would need a
more detailed commit description describing the algorithm change, the
motivation for the change, and justifying the added complexity and
state by showing some meaningful performance test results that
demonstrate some improvement.

>  include/linux/tcp.h  |  1 +
>  net/ipv4/tcp_input.c | 16 +++++++++++++---
>  2 files changed, 14 insertions(+), 3 deletions(-)
>
> diff --git a/include/linux/tcp.h b/include/linux/tcp.h
> index a9fbe22..18e091c 100644
> --- a/include/linux/tcp.h
> +++ b/include/linux/tcp.h
> @@ -410,6 +410,7 @@ struct tcp_sock {
>                 u32     space;
>                 u32     seq;
>                 u64     time;
> +               u32     prior_rate;
>         } rcvq_space;
>
>  /* TCP-specific MTU probe information. */
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index ab5f0ea..2bdf2a5 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -544,6 +544,7 @@ static void tcp_init_buffer_space(struct sock *sk)
>         tcp_mstamp_refresh(tp);
>         tp->rcvq_space.time = tp->tcp_mstamp;
>         tp->rcvq_space.seq = tp->copied_seq;
> +       tp->rcvq_space.prior_rate = 0;
>
>         maxwin = tcp_full_space(sk);
>
> @@ -701,6 +702,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
>  void tcp_rcv_space_adjust(struct sock *sk)
>  {
>         struct tcp_sock *tp = tcp_sk(sk);
> +       u64 pre_copied_rate, copied_rate;
>         u32 copied;
>         int time;
>
> @@ -713,7 +715,14 @@ void tcp_rcv_space_adjust(struct sock *sk)
>
>         /* Number of bytes copied to user in last RTT */
>         copied = tp->copied_seq - tp->rcvq_space.seq;
> -       if (copied <= tp->rcvq_space.space)
> +       copied_rate = copied * USEC_PER_SEC;
> +       do_div(copied_rate, time);
> +       pre_copied_rate = tp->rcvq_space.prior_rate;
> +       if (!tp->rcvq_space.prior_rate) {
> +               pre_copied_rate = tp->rcvq_space.space * USEC_PER_SEC;
> +               do_div(pre_copied_rate, time);
> +       }
> +       if (copied_rate <= pre_copied_rate || !pre_copied_rate)
>                 goto new_measure;
>
>         /* A bit of theory :
> @@ -736,8 +745,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
>                 rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
>
>                 /* Accommodate for sender rate increase (eg. slow start) */
> -               grow = rcvwin * (copied - tp->rcvq_space.space);
> -               do_div(grow, tp->rcvq_space.space);
> +               grow = rcvwin * (copied_rate - pre_copied_rate);
> +               do_div(grow, pre_copied_rate);
>                 rcvwin += (grow << 1);
>
>                 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
> @@ -755,6 +764,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
>                 }
>         }
>         tp->rcvq_space.space = copied;
> +       tp->rcvq_space.prior_rate = pre_copied_rate;

Shouldn't that line be:

   tp->rcvq_space.prior_rate = copied_rate;

Otherwise, AFAICT the patch could preserve forever in
tp->rcvq_space.prior_rate the very first rate that was computed, since
the top of the patch does:

 +       pre_copied_rate = tp->rcvq_space.prior_rate;

and the bottom of the patch does:

 +       tp->rcvq_space.prior_rate = pre_copied_rate;

best regards,
neal

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] tcp: adjust rcvbuff according copied rate of user space
  2022-08-09 21:28 ` Neal Cardwell
@ 2022-08-10  7:24   ` Yonglong Li
  0 siblings, 0 replies; 3+ messages in thread
From: Yonglong Li @ 2022-08-10  7:24 UTC (permalink / raw)
  To: Neal Cardwell
  Cc: netdev, davem, edumazet, dsahern, kuba, pabeni, Yuchung Cheng



On 8/10/2022 5:28 AM, Neal Cardwell wrote:
> On Tue, Aug 9, 2022 at 4:48 AM Yonglong Li <liyonglong@chinatelecom.cn> wrote:
>>
>> it is more reasonable to adjust rcvbuff by copied rate instead
>> of copied buff len.
>>
>> Signed-off-by: Yonglong Li <liyonglong@chinatelecom.cn>
>> ---
> 
> Thanks for sending out the patch. My sense is that this would need a
> more detailed commit description describing the algorithm change, the
> motivation for the change, and justifying the added complexity and
> state by showing some meaningful performance test results that
> demonstrate some improvement.
Hi Neal,

Thanks for your feedback. will add more detail in next version.

> 
>>  include/linux/tcp.h  |  1 +
>>  net/ipv4/tcp_input.c | 16 +++++++++++++---
>>  2 files changed, 14 insertions(+), 3 deletions(-)
>>
>> diff --git a/include/linux/tcp.h b/include/linux/tcp.h
>> index a9fbe22..18e091c 100644
>> --- a/include/linux/tcp.h
>> +++ b/include/linux/tcp.h
>> @@ -410,6 +410,7 @@ struct tcp_sock {
>>                 u32     space;
>>                 u32     seq;
>>                 u64     time;
>> +               u32     prior_rate;
>>         } rcvq_space;
>>
>>  /* TCP-specific MTU probe information. */
>> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
>> index ab5f0ea..2bdf2a5 100644
>> --- a/net/ipv4/tcp_input.c
>> +++ b/net/ipv4/tcp_input.c
>> @@ -544,6 +544,7 @@ static void tcp_init_buffer_space(struct sock *sk)
>>         tcp_mstamp_refresh(tp);
>>         tp->rcvq_space.time = tp->tcp_mstamp;
>>         tp->rcvq_space.seq = tp->copied_seq;
>> +       tp->rcvq_space.prior_rate = 0;
>>
>>         maxwin = tcp_full_space(sk);
>>
>> @@ -701,6 +702,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
>>  void tcp_rcv_space_adjust(struct sock *sk)
>>  {
>>         struct tcp_sock *tp = tcp_sk(sk);
>> +       u64 pre_copied_rate, copied_rate;
>>         u32 copied;
>>         int time;
>>
>> @@ -713,7 +715,14 @@ void tcp_rcv_space_adjust(struct sock *sk)
>>
>>         /* Number of bytes copied to user in last RTT */
>>         copied = tp->copied_seq - tp->rcvq_space.seq;
>> -       if (copied <= tp->rcvq_space.space)
>> +       copied_rate = copied * USEC_PER_SEC;
>> +       do_div(copied_rate, time);
>> +       pre_copied_rate = tp->rcvq_space.prior_rate;
>> +       if (!tp->rcvq_space.prior_rate) {
>> +               pre_copied_rate = tp->rcvq_space.space * USEC_PER_SEC;
>> +               do_div(pre_copied_rate, time);
>> +       }
>> +       if (copied_rate <= pre_copied_rate || !pre_copied_rate)
>>                 goto new_measure;
>>
>>         /* A bit of theory :
>> @@ -736,8 +745,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
>>                 rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
>>
>>                 /* Accommodate for sender rate increase (eg. slow start) */
>> -               grow = rcvwin * (copied - tp->rcvq_space.space);
>> -               do_div(grow, tp->rcvq_space.space);
>> +               grow = rcvwin * (copied_rate - pre_copied_rate);
>> +               do_div(grow, pre_copied_rate);
>>                 rcvwin += (grow << 1);
>>
>>                 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
>> @@ -755,6 +764,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
>>                 }
>>         }
>>         tp->rcvq_space.space = copied;
>> +       tp->rcvq_space.prior_rate = pre_copied_rate;
> 
> Shouldn't that line be:
> 
>    tp->rcvq_space.prior_rate = copied_rate;
> 
> Otherwise, AFAICT the patch could preserve forever in
> tp->rcvq_space.prior_rate the very first rate that was computed, since
> the top of the patch does:
> 
>  +       pre_copied_rate = tp->rcvq_space.prior_rate;
> 
> and the bottom of the patch does:
> 
>  +       tp->rcvq_space.prior_rate = pre_copied_rate;
> 
sorry. I get a mistake...

> best regards,
> neal
> 

-- 
Li YongLong

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-08-10  7:25 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-09  8:47 [PATCH] tcp: adjust rcvbuff according copied rate of user space Yonglong Li
2022-08-09 21:28 ` Neal Cardwell
2022-08-10  7:24   ` Yonglong Li

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.