All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow
@ 2022-06-20 16:19 Paolo Abeni
  2022-06-20 16:19 ` [PATCH net-next v3 2/3] mptcp: drop SK_RECLAIM_* macros Paolo Abeni
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Paolo Abeni @ 2022-06-20 16:19 UTC (permalink / raw)
  To: mptcp

The memory accounting is broken in such exceptional code
path, and after commit 4890b686f408 ("net: keep sk->sk_forward_alloc
as small as possible") we can't find much help there.

Drop the broken code.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
This is possibly for net, but makes sense only on top of recent
net-next patches, so whatever ;)
---
 net/mptcp/protocol.c | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 6d2aa41390e7..0d4b2c010da0 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -328,15 +328,10 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
 
 	amt = sk_mem_pages(size);
 	amount = amt << PAGE_SHIFT;
-	msk->rmem_fwd_alloc += amount;
-	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) {
-		if (ssk->sk_forward_alloc < amount) {
-			msk->rmem_fwd_alloc -= amount;
-			return false;
-		}
+	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
+		return false;
 
-		ssk->sk_forward_alloc -= amount;
-	}
+	msk->rmem_fwd_alloc += amount;
 	return true;
 }
 
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH net-next v3 2/3] mptcp: drop SK_RECLAIM_* macros
  2022-06-20 16:19 [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow Paolo Abeni
@ 2022-06-20 16:19 ` Paolo Abeni
  2022-06-20 20:38   ` Mat Martineau
  2022-06-20 16:19 ` [PATCH net-next v3 3/3] mptcp: refine memory scheduling Paolo Abeni
  2022-06-20 16:46 ` [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow Paolo Abeni
  2 siblings, 1 reply; 6+ messages in thread
From: Paolo Abeni @ 2022-06-20 16:19 UTC (permalink / raw)
  To: mptcp

After commit 4890b686f408 ("net: keep sk->sk_forward_alloc as small as
possible"), the MPTCP protocol is the last SK_RECLAIM_CHUNK and
SK_RECLAIM_THRESHOLD users.

Update the MPTCP reclaim schema to match the core/TCP one and drop the
mentioned macros. This additionally clean the MPTCP code a bit.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
 net/mptcp/protocol.c | 35 ++---------------------------------
 1 file changed, 2 insertions(+), 33 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 0d4b2c010da0..b31bac33f87a 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -181,8 +181,8 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
 	reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
 
 	/* see sk_mem_uncharge() for the rationale behind the following schema */
-	if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
-		__mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK);
+	if (unlikely(reclaimable >= PAGE_SIZE))
+		__mptcp_rmem_reclaim(sk, reclaimable);
 }
 
 static void mptcp_rfree(struct sk_buff *skb)
@@ -961,25 +961,6 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
 		df->data_seq + df->data_len == msk->write_seq;
 }
 
-static void __mptcp_mem_reclaim_partial(struct sock *sk)
-{
-	int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
-
-	lockdep_assert_held_once(&sk->sk_lock.slock);
-
-	if (reclaimable > (int)PAGE_SIZE)
-		__mptcp_rmem_reclaim(sk, reclaimable - 1);
-
-	sk_mem_reclaim(sk);
-}
-
-static void mptcp_mem_reclaim_partial(struct sock *sk)
-{
-	mptcp_data_lock(sk);
-	__mptcp_mem_reclaim_partial(sk);
-	mptcp_data_unlock(sk);
-}
-
 static void dfrag_uncharge(struct sock *sk, int len)
 {
 	sk_mem_uncharge(sk, len);
@@ -999,7 +980,6 @@ static void __mptcp_clean_una(struct sock *sk)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
 	struct mptcp_data_frag *dtmp, *dfrag;
-	bool cleaned = false;
 	u64 snd_una;
 
 	/* on fallback we just need to ignore snd_una, as this is really
@@ -1022,7 +1002,6 @@ static void __mptcp_clean_una(struct sock *sk)
 		}
 
 		dfrag_clear(sk, dfrag);
-		cleaned = true;
 	}
 
 	dfrag = mptcp_rtx_head(sk);
@@ -1044,7 +1023,6 @@ static void __mptcp_clean_una(struct sock *sk)
 		dfrag->already_sent -= delta;
 
 		dfrag_uncharge(sk, delta);
-		cleaned = true;
 	}
 
 	/* all retransmitted data acked, recovery completed */
@@ -1052,9 +1030,6 @@ static void __mptcp_clean_una(struct sock *sk)
 		msk->recovery = false;
 
 out:
-	if (cleaned && tcp_under_memory_pressure(sk))
-		__mptcp_mem_reclaim_partial(sk);
-
 	if (snd_una == READ_ONCE(msk->snd_nxt) &&
 	    snd_una == READ_ONCE(msk->write_seq)) {
 		if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
@@ -1206,12 +1181,6 @@ static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, boo
 {
 	gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
 
-	if (unlikely(tcp_under_memory_pressure(sk))) {
-		if (data_lock_held)
-			__mptcp_mem_reclaim_partial(sk);
-		else
-			mptcp_mem_reclaim_partial(sk);
-	}
 	return __mptcp_alloc_tx_skb(sk, ssk, gfp);
 }
 
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH net-next v3 3/3] mptcp: refine memory scheduling
  2022-06-20 16:19 [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow Paolo Abeni
  2022-06-20 16:19 ` [PATCH net-next v3 2/3] mptcp: drop SK_RECLAIM_* macros Paolo Abeni
@ 2022-06-20 16:19 ` Paolo Abeni
  2022-06-20 20:32   ` Mat Martineau
  2022-06-20 16:46 ` [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow Paolo Abeni
  2 siblings, 1 reply; 6+ messages in thread
From: Paolo Abeni @ 2022-06-20 16:19 UTC (permalink / raw)
  To: mptcp

Similar to commit 7c80b038d23e ("net: fix sk_wmem_schedule() and
sk_rmem_schedule() errors"), let the MPTCP receive path schedule
exactly the required amount of memory.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
 net/mptcp/protocol.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index b31bac33f87a..0e295b3bce54 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -326,6 +326,7 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
 	if (size < msk->rmem_fwd_alloc)
 		return true;
 
+	size -= msk->rmem_fwd_alloc;
 	amt = sk_mem_pages(size);
 	amount = amt << PAGE_SHIFT;
 	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow
  2022-06-20 16:19 [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow Paolo Abeni
  2022-06-20 16:19 ` [PATCH net-next v3 2/3] mptcp: drop SK_RECLAIM_* macros Paolo Abeni
  2022-06-20 16:19 ` [PATCH net-next v3 3/3] mptcp: refine memory scheduling Paolo Abeni
@ 2022-06-20 16:46 ` Paolo Abeni
  2 siblings, 0 replies; 6+ messages in thread
From: Paolo Abeni @ 2022-06-20 16:46 UTC (permalink / raw)
  To: mptcp

On Mon, 2022-06-20 at 18:19 +0200, Paolo Abeni wrote:
> The memory accounting is broken in such exceptional code
> path, and after commit 4890b686f408 ("net: keep sk->sk_forward_alloc
> as small as possible") we can't find much help there.
> 
> Drop the broken code.
> 
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>

This is actually a "v2" ;)

The only change is the new patch 3/3 - I even kept the obsolete comment
after the commit message in patch 1/3 :)

/P


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next v3 3/3] mptcp: refine memory scheduling
  2022-06-20 16:19 ` [PATCH net-next v3 3/3] mptcp: refine memory scheduling Paolo Abeni
@ 2022-06-20 20:32   ` Mat Martineau
  0 siblings, 0 replies; 6+ messages in thread
From: Mat Martineau @ 2022-06-20 20:32 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp

On Mon, 20 Jun 2022, Paolo Abeni wrote:

> Similar to commit 7c80b038d23e ("net: fix sk_wmem_schedule() and
> sk_rmem_schedule() errors"), let the MPTCP receive path schedule
> exactly the required amount of memory.
>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
> net/mptcp/protocol.c | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index b31bac33f87a..0e295b3bce54 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -326,6 +326,7 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
> 	if (size < msk->rmem_fwd_alloc)

With the change below, also would be good to change the above line to:

 	if (size <= msk->rmem_fwd_alloc)

so there's no attempt to allocate 0 new pages.

> 		return true;
>
> +	size -= msk->rmem_fwd_alloc;
> 	amt = sk_mem_pages(size);
> 	amount = amt << PAGE_SHIFT;
> 	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
> -- 
> 2.35.3
>
>
>

--
Mat Martineau
Intel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next v3 2/3] mptcp: drop SK_RECLAIM_* macros
  2022-06-20 16:19 ` [PATCH net-next v3 2/3] mptcp: drop SK_RECLAIM_* macros Paolo Abeni
@ 2022-06-20 20:38   ` Mat Martineau
  0 siblings, 0 replies; 6+ messages in thread
From: Mat Martineau @ 2022-06-20 20:38 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp

On Mon, 20 Jun 2022, Paolo Abeni wrote:

> After commit 4890b686f408 ("net: keep sk->sk_forward_alloc as small as
> possible"), the MPTCP protocol is the last SK_RECLAIM_CHUNK and
> SK_RECLAIM_THRESHOLD users.
>
> Update the MPTCP reclaim schema to match the core/TCP one and drop the
> mentioned macros. This additionally clean the MPTCP code a bit.
>

Please also add one more patch to the series that deletes SK_RECLAIM_* 
from include/net/sock.h

- Mat

> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
> net/mptcp/protocol.c | 35 ++---------------------------------
> 1 file changed, 2 insertions(+), 33 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 0d4b2c010da0..b31bac33f87a 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -181,8 +181,8 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
> 	reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
>
> 	/* see sk_mem_uncharge() for the rationale behind the following schema */
> -	if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
> -		__mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK);
> +	if (unlikely(reclaimable >= PAGE_SIZE))
> +		__mptcp_rmem_reclaim(sk, reclaimable);
> }
>
> static void mptcp_rfree(struct sk_buff *skb)
> @@ -961,25 +961,6 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
> 		df->data_seq + df->data_len == msk->write_seq;
> }
>
> -static void __mptcp_mem_reclaim_partial(struct sock *sk)
> -{
> -	int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
> -
> -	lockdep_assert_held_once(&sk->sk_lock.slock);
> -
> -	if (reclaimable > (int)PAGE_SIZE)
> -		__mptcp_rmem_reclaim(sk, reclaimable - 1);
> -
> -	sk_mem_reclaim(sk);
> -}
> -
> -static void mptcp_mem_reclaim_partial(struct sock *sk)
> -{
> -	mptcp_data_lock(sk);
> -	__mptcp_mem_reclaim_partial(sk);
> -	mptcp_data_unlock(sk);
> -}
> -
> static void dfrag_uncharge(struct sock *sk, int len)
> {
> 	sk_mem_uncharge(sk, len);
> @@ -999,7 +980,6 @@ static void __mptcp_clean_una(struct sock *sk)
> {
> 	struct mptcp_sock *msk = mptcp_sk(sk);
> 	struct mptcp_data_frag *dtmp, *dfrag;
> -	bool cleaned = false;
> 	u64 snd_una;
>
> 	/* on fallback we just need to ignore snd_una, as this is really
> @@ -1022,7 +1002,6 @@ static void __mptcp_clean_una(struct sock *sk)
> 		}
>
> 		dfrag_clear(sk, dfrag);
> -		cleaned = true;
> 	}
>
> 	dfrag = mptcp_rtx_head(sk);
> @@ -1044,7 +1023,6 @@ static void __mptcp_clean_una(struct sock *sk)
> 		dfrag->already_sent -= delta;
>
> 		dfrag_uncharge(sk, delta);
> -		cleaned = true;
> 	}
>
> 	/* all retransmitted data acked, recovery completed */
> @@ -1052,9 +1030,6 @@ static void __mptcp_clean_una(struct sock *sk)
> 		msk->recovery = false;
>
> out:
> -	if (cleaned && tcp_under_memory_pressure(sk))
> -		__mptcp_mem_reclaim_partial(sk);
> -
> 	if (snd_una == READ_ONCE(msk->snd_nxt) &&
> 	    snd_una == READ_ONCE(msk->write_seq)) {
> 		if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
> @@ -1206,12 +1181,6 @@ static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, boo
> {
> 	gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
>
> -	if (unlikely(tcp_under_memory_pressure(sk))) {
> -		if (data_lock_held)
> -			__mptcp_mem_reclaim_partial(sk);
> -		else
> -			mptcp_mem_reclaim_partial(sk);
> -	}
> 	return __mptcp_alloc_tx_skb(sk, ssk, gfp);
> }
>
> -- 
> 2.35.3
>
>
>

--
Mat Martineau
Intel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-06-20 20:38 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-20 16:19 [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow Paolo Abeni
2022-06-20 16:19 ` [PATCH net-next v3 2/3] mptcp: drop SK_RECLAIM_* macros Paolo Abeni
2022-06-20 20:38   ` Mat Martineau
2022-06-20 16:19 ` [PATCH net-next v3 3/3] mptcp: refine memory scheduling Paolo Abeni
2022-06-20 20:32   ` Mat Martineau
2022-06-20 16:46 ` [PATCH net-next v3 1/3] mptcp: never fetch fwd memory from the subflow Paolo Abeni

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.