All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM
@ 2021-09-27 18:25 Wei Wang
  2021-09-27 18:25 ` [PATCH net-next 1/3] " Wei Wang
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Wei Wang @ 2021-09-27 18:25 UTC (permalink / raw)
  To: David S . Miller, netdev, Jakub Kicinski; +Cc: Shakeel Butt, Eric Dumazet

This patch series introduces a new socket option SO_RESERVE_MEM. 
This socket option provides a mechanism for users to reserve a certain
amount of memory for the socket to use. When this option is set, kernel
charges the user specified amount of memory to memcg, as well as
sk_forward_alloc. This amount of memory is not reclaimable and is
available in sk_forward_alloc for this socket.
With this socket option set, the networking stack spends less cycles
doing forward alloc and reclaim, which should lead to better system
performance, with the cost of an amount of pre-allocated and
unreclaimable memory, even under memory pressure.

The first patch is the implementation of this socket option. The
following 2 patches change the tcp stack to make use of this reserved
memory when under memory pressure. This makes the tcp stack behavior
more flexible when under memory pressure, and provides a way for user to
control the distribution of the memory among its sockets.

Wei Wang (3):
  net: add new socket option SO_RESERVE_MEM
  tcp: adjust sndbuf according to sk_reserved_mem
  tcp: adjust rcv_ssthresh according to sk_reserved_mem

 include/net/sock.h                | 44 +++++++++++++++++---
 include/net/tcp.h                 | 11 +++++
 include/uapi/asm-generic/socket.h |  2 +
 net/core/sock.c                   | 69 +++++++++++++++++++++++++++++++
 net/core/stream.c                 |  2 +-
 net/ipv4/af_inet.c                |  2 +-
 net/ipv4/tcp_input.c              | 26 ++++++++++--
 net/ipv4/tcp_output.c             |  3 +-
 8 files changed, 146 insertions(+), 13 deletions(-)

-- 
2.33.0.685.g46640cef36-goog


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH net-next 1/3] net: add new socket option SO_RESERVE_MEM
  2021-09-27 18:25 [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Wei Wang
@ 2021-09-27 18:25 ` Wei Wang
  2021-11-12 13:13   ` kernel test robot
  2021-09-27 18:25 ` [PATCH net-next 2/3] tcp: adjust sndbuf according to sk_reserved_mem Wei Wang
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 7+ messages in thread
From: Wei Wang @ 2021-09-27 18:25 UTC (permalink / raw)
  To: David S . Miller, netdev, Jakub Kicinski; +Cc: Shakeel Butt, Eric Dumazet

This socket option provides a mechanism for users to reserve a certain
amount of memory for the socket to use. When this option is set, kernel
charges the user specified amount of memory to memcg, as well as
sk_forward_alloc. This amount of memory is not reclaimable and is
available in sk_forward_alloc for this socket.
With this socket option set, the networking stack spends less cycles
doing forward alloc and reclaim, which should lead to better system
performance, with the cost of an amount of pre-allocated and
unreclaimable memory, even under memory pressure.

Note:
This socket option is only available when memory cgroup is enabled and we
require this reserved memory to be charged to the user's memcg. We hope
this could avoid mis-behaving users to abused this feature to reserve a
large amount on certain sockets and cause unfairness for others.

Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com> 
---
 include/net/sock.h                | 43 ++++++++++++++++---
 include/uapi/asm-generic/socket.h |  2 +
 net/core/sock.c                   | 69 +++++++++++++++++++++++++++++++
 net/core/stream.c                 |  2 +-
 net/ipv4/af_inet.c                |  2 +-
 5 files changed, 111 insertions(+), 7 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index 66a9a90f9558..b0df2d3843fd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -412,6 +412,7 @@ struct sock {
 #define sk_rmem_alloc sk_backlog.rmem_alloc
 
 	int			sk_forward_alloc;
+	u32			sk_reserved_mem;
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	unsigned int		sk_ll_usec;
 	/* ===== mostly read cache line ===== */
@@ -1515,20 +1516,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
 		skb_pfmemalloc(skb);
 }
 
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+	int unused_mem;
+
+	if (likely(!sk->sk_reserved_mem))
+		return 0;
+
+	unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+			atomic_read(&sk->sk_rmem_alloc);
+
+	return unused_mem > 0 ? unused_mem : 0;
+}
+
 static inline void sk_mem_reclaim(struct sock *sk)
 {
+	int reclaimable;
+
 	if (!sk_has_account(sk))
 		return;
-	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
-		__sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+	reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+	if (reclaimable >= SK_MEM_QUANTUM)
+		__sk_mem_reclaim(sk, reclaimable);
+}
+
+static inline void sk_mem_reclaim_final(struct sock *sk)
+{
+	sk->sk_reserved_mem = 0;
+	sk_mem_reclaim(sk);
 }
 
 static inline void sk_mem_reclaim_partial(struct sock *sk)
 {
+	int reclaimable;
+
 	if (!sk_has_account(sk))
 		return;
-	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
-		__sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+
+	reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+	if (reclaimable > SK_MEM_QUANTUM)
+		__sk_mem_reclaim(sk, reclaimable - 1);
 }
 
 static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1540,9 +1570,12 @@ static inline void sk_mem_charge(struct sock *sk, int size)
 
 static inline void sk_mem_uncharge(struct sock *sk, int size)
 {
+	int reclaimable;
+
 	if (!sk_has_account(sk))
 		return;
 	sk->sk_forward_alloc += size;
+	reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
 
 	/* Avoid a possible overflow.
 	 * TCP send queues can make this happen, if sk_mem_reclaim()
@@ -1551,7 +1584,7 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
 	 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
 	 * no need to hold that much forward allocation anyway.
 	 */
-	if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+	if (unlikely(reclaimable >= 1 << 21))
 		__sk_mem_reclaim(sk, 1 << 20);
 }
 
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 1f0a2b4864e4..c77a1313b3b0 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -126,6 +126,8 @@
 
 #define SO_BUF_LOCK		72
 
+#define SO_RESERVE_MEM		73
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/net/core/sock.c b/net/core/sock.c
index 62627e868e03..a658c0173015 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -947,6 +947,53 @@ void sock_set_mark(struct sock *sk, u32 val)
 }
 EXPORT_SYMBOL(sock_set_mark);
 
+static void sock_release_reserved_memory(struct sock *sk, int bytes)
+{
+	/* Round down bytes to multiple of pages */
+	bytes &= ~(SK_MEM_QUANTUM - 1);
+
+	WARN_ON(bytes > sk->sk_reserved_mem);
+	sk->sk_reserved_mem -= bytes;
+	sk_mem_reclaim(sk);
+}
+
+static int sock_reserve_memory(struct sock *sk, int bytes)
+{
+	long allocated;
+	bool charged;
+	int pages;
+
+	if (!mem_cgroup_sockets_enabled || !sk->sk_memcg)
+		return -EOPNOTSUPP;
+
+	if (!bytes)
+		return 0;
+
+	pages = sk_mem_pages(bytes);
+
+	/* pre-charge to memcg */
+	charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
+					  GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+	if (!charged)
+		return -ENOMEM;
+
+	/* pre-charge to forward_alloc */
+	allocated = sk_memory_allocated_add(sk, pages);
+	/* If the system goes into memory pressure with this
+	 * precharge, give up and return error.
+	 */
+	if (allocated > sk_prot_mem_limits(sk, 1)) {
+		sk_memory_allocated_sub(sk, pages);
+		mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
+		return -ENOMEM;
+	}
+	sk->sk_forward_alloc += pages << SK_MEM_QUANTUM_SHIFT;
+
+	sk->sk_reserved_mem += pages << SK_MEM_QUANTUM_SHIFT;
+
+	return 0;
+}
+
 /*
  *	This is meant for all protocols to use and covers goings on
  *	at the socket level. Everything here is generic.
@@ -1367,6 +1414,23 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
 					  ~SOCK_BUF_LOCK_MASK);
 		break;
 
+	case SO_RESERVE_MEM:
+	{
+		int delta;
+
+		if (val < 0) {
+			ret = -EINVAL;
+			break;
+		}
+
+		delta = val - sk->sk_reserved_mem;
+		if (delta < 0)
+			sock_release_reserved_memory(sk, -delta);
+		else
+			ret = sock_reserve_memory(sk, delta);
+		break;
+	}
+
 	default:
 		ret = -ENOPROTOOPT;
 		break;
@@ -1733,6 +1797,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 		v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
 		break;
 
+	case SO_RESERVE_MEM:
+		v.val = sk->sk_reserved_mem;
+		break;
+
 	default:
 		/* We implement the SO_SNDLOWAT etc to not be settable
 		 * (1003.1g 7).
@@ -2045,6 +2113,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 	newsk->sk_dst_pending_confirm = 0;
 	newsk->sk_wmem_queued	= 0;
 	newsk->sk_forward_alloc = 0;
+	newsk->sk_reserved_mem  = 0;
 	atomic_set(&newsk->sk_drops, 0);
 	newsk->sk_send_head	= NULL;
 	newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
diff --git a/net/core/stream.c b/net/core/stream.c
index 4f1d4aa5fb38..e09ffd410685 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -202,7 +202,7 @@ void sk_stream_kill_queues(struct sock *sk)
 	WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
 
 	/* Account for returned memory. */
-	sk_mem_reclaim(sk);
+	sk_mem_reclaim_final(sk);
 
 	WARN_ON(sk->sk_wmem_queued);
 	WARN_ON(sk->sk_forward_alloc);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1d816a5fd3eb..a06f6a30b0d4 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -139,7 +139,7 @@ void inet_sock_destruct(struct sock *sk)
 	}
 	__skb_queue_purge(&sk->sk_error_queue);
 
-	sk_mem_reclaim(sk);
+	sk_mem_reclaim_final(sk);
 
 	if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
 		pr_err("Attempt to release TCP socket in state %d %p\n",
-- 
2.33.0.685.g46640cef36-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH net-next 2/3] tcp: adjust sndbuf according to sk_reserved_mem
  2021-09-27 18:25 [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Wei Wang
  2021-09-27 18:25 ` [PATCH net-next 1/3] " Wei Wang
@ 2021-09-27 18:25 ` Wei Wang
  2021-09-27 18:25 ` [PATCH net-next 3/3] tcp: adjust rcv_ssthresh " Wei Wang
  2021-09-28  0:50 ` [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Jakub Kicinski
  3 siblings, 0 replies; 7+ messages in thread
From: Wei Wang @ 2021-09-27 18:25 UTC (permalink / raw)
  To: David S . Miller, netdev, Jakub Kicinski; +Cc: Shakeel Butt, Eric Dumazet

If user sets SO_RESERVE_MEM socket option, in order to fully utilize the
reserved memory in memory pressure state on the tx path, we modify the
logic in sk_stream_moderate_sndbuf() to set sk_sndbuf according to
available reserved memory, instead of MIN_SOCK_SNDBUF, and adjust it
when new data is acked.

Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com> 
---
 include/net/sock.h   |  1 +
 net/ipv4/tcp_input.c | 14 ++++++++++++--
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index b0df2d3843fd..e6ad628adcd2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2388,6 +2388,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
 		return;
 
 	val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+	val = max_t(u32, val, sk_unused_reserved_mem(sk));
 
 	WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 141e85e6422b..a7611256f235 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5381,7 +5381,7 @@ static int tcp_prune_queue(struct sock *sk)
 	return -1;
 }
 
-static bool tcp_should_expand_sndbuf(const struct sock *sk)
+static bool tcp_should_expand_sndbuf(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 
@@ -5392,8 +5392,18 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
 		return false;
 
 	/* If we are under global TCP memory pressure, do not expand.  */
-	if (tcp_under_memory_pressure(sk))
+	if (tcp_under_memory_pressure(sk)) {
+		int unused_mem = sk_unused_reserved_mem(sk);
+
+		/* Adjust sndbuf according to reserved mem. But make sure
+		 * it never goes below SOCK_MIN_SNDBUF.
+		 * See sk_stream_moderate_sndbuf() for more details.
+		 */
+		if (unused_mem > SOCK_MIN_SNDBUF)
+			WRITE_ONCE(sk->sk_sndbuf, unused_mem);
+
 		return false;
+	}
 
 	/* If we are under soft global TCP memory pressure, do not expand.  */
 	if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
-- 
2.33.0.685.g46640cef36-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH net-next 3/3] tcp: adjust rcv_ssthresh according to sk_reserved_mem
  2021-09-27 18:25 [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Wei Wang
  2021-09-27 18:25 ` [PATCH net-next 1/3] " Wei Wang
  2021-09-27 18:25 ` [PATCH net-next 2/3] tcp: adjust sndbuf according to sk_reserved_mem Wei Wang
@ 2021-09-27 18:25 ` Wei Wang
  2021-09-28  0:50 ` [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Jakub Kicinski
  3 siblings, 0 replies; 7+ messages in thread
From: Wei Wang @ 2021-09-27 18:25 UTC (permalink / raw)
  To: David S . Miller, netdev, Jakub Kicinski; +Cc: Shakeel Butt, Eric Dumazet

When user sets SO_RESERVE_MEM socket option, in order to utilize the
reserved memory when in memory pressure state, we adjust rcv_ssthresh
according to the available reserved memory for the socket, instead of
using 4 * advmss always.

Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com> 
---
 include/net/tcp.h     | 11 +++++++++++
 net/ipv4/tcp_input.c  | 12 ++++++++++--
 net/ipv4/tcp_output.c |  3 +--
 3 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3166dc15d7d6..27743a97d6cb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1418,6 +1418,17 @@ static inline int tcp_full_space(const struct sock *sk)
 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 }
 
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+	int unused_mem = sk_unused_reserved_mem(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+	if (unused_mem)
+		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+					 tcp_win_from_space(sk, unused_mem));
+}
+
 void tcp_cleanup_rbuf(struct sock *sk, int copied);
 
 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a7611256f235..b79a571a752e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -500,8 +500,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
 
 	room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
+	if (room <= 0)
+		return;
+
 	/* Check #1 */
-	if (room > 0 && !tcp_under_memory_pressure(sk)) {
+	if (!tcp_under_memory_pressure(sk)) {
 		unsigned int truesize = truesize_adjust(adjust, skb);
 		int incr;
 
@@ -518,6 +521,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
 			tp->rcv_ssthresh += min(room, incr);
 			inet_csk(sk)->icsk_ack.quick |= 1;
 		}
+	} else {
+		/* Under pressure:
+		 * Adjust rcv_ssthresh according to reserved mem
+		 */
+		tcp_adjust_rcv_ssthresh(sk);
 	}
 }
 
@@ -5346,7 +5354,7 @@ static int tcp_prune_queue(struct sock *sk)
 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 		tcp_clamp_window(sk);
 	else if (tcp_under_memory_pressure(sk))
-		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+		tcp_adjust_rcv_ssthresh(sk);
 
 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
 		return 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6d72f3ea48c4..062d6cf13d06 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2969,8 +2969,7 @@ u32 __tcp_select_window(struct sock *sk)
 		icsk->icsk_ack.quick = 0;
 
 		if (tcp_under_memory_pressure(sk))
-			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
-					       4U * tp->advmss);
+			tcp_adjust_rcv_ssthresh(sk);
 
 		/* free_space might become our new window, make sure we don't
 		 * increase it due to wscale.
-- 
2.33.0.685.g46640cef36-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM
  2021-09-27 18:25 [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Wei Wang
                   ` (2 preceding siblings ...)
  2021-09-27 18:25 ` [PATCH net-next 3/3] tcp: adjust rcv_ssthresh " Wei Wang
@ 2021-09-28  0:50 ` Jakub Kicinski
  2021-09-28  3:33   ` Wei Wang
  3 siblings, 1 reply; 7+ messages in thread
From: Jakub Kicinski @ 2021-09-28  0:50 UTC (permalink / raw)
  To: Wei Wang; +Cc: David S . Miller, netdev, Shakeel Butt, Eric Dumazet

On Mon, 27 Sep 2021 11:25:20 -0700 Wei Wang wrote:
> This patch series introduces a new socket option SO_RESERVE_MEM. 
> This socket option provides a mechanism for users to reserve a certain
> amount of memory for the socket to use. When this option is set, kernel
> charges the user specified amount of memory to memcg, as well as
> sk_forward_alloc. This amount of memory is not reclaimable and is
> available in sk_forward_alloc for this socket.
> With this socket option set, the networking stack spends less cycles
> doing forward alloc and reclaim, which should lead to better system
> performance, with the cost of an amount of pre-allocated and
> unreclaimable memory, even under memory pressure.

Does not apply cleanly - would you mind rebasing/resending?

Would you be able to share what order of magnitude improvements you see?

Thanks!

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM
  2021-09-28  0:50 ` [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Jakub Kicinski
@ 2021-09-28  3:33   ` Wei Wang
  0 siblings, 0 replies; 7+ messages in thread
From: Wei Wang @ 2021-09-28  3:33 UTC (permalink / raw)
  To: Jakub Kicinski; +Cc: David S . Miller, netdev, Shakeel Butt, Eric Dumazet

On Mon, Sep 27, 2021 at 5:50 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Mon, 27 Sep 2021 11:25:20 -0700 Wei Wang wrote:
> > This patch series introduces a new socket option SO_RESERVE_MEM.
> > This socket option provides a mechanism for users to reserve a certain
> > amount of memory for the socket to use. When this option is set, kernel
> > charges the user specified amount of memory to memcg, as well as
> > sk_forward_alloc. This amount of memory is not reclaimable and is
> > available in sk_forward_alloc for this socket.
> > With this socket option set, the networking stack spends less cycles
> > doing forward alloc and reclaim, which should lead to better system
> > performance, with the cost of an amount of pre-allocated and
> > unreclaimable memory, even under memory pressure.
>
> Does not apply cleanly - would you mind rebasing/resending?
>
Sure. Will do. Sorry about that.

> Would you be able to share what order of magnitude improvements you see?
>
OK. Will share some results from a synthetic benchmark.

> Thanks!

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next 1/3] net: add new socket option SO_RESERVE_MEM
  2021-09-27 18:25 ` [PATCH net-next 1/3] " Wei Wang
@ 2021-11-12 13:13   ` kernel test robot
  0 siblings, 0 replies; 7+ messages in thread
From: kernel test robot @ 2021-11-12 13:13 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 5727 bytes --]

Hi Wei,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on net/master]
[also build test ERROR on horms-ipvs/master v5.15]
[cannot apply to net-next/master linus/master next-20211112]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Wei-Wang/net-add-new-socket-option-SO_RESERVE_MEM/20210929-124516
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git f936bb42aeb94a069bec7c9e04100d199c372956
config: mips-loongson2k_defconfig (attached as .config)
compiler: mips64el-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/e60de1697822b3d6384c6bc620d7048c05fca819
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Wei-Wang/net-add-new-socket-option-SO_RESERVE_MEM/20210929-124516
        git checkout e60de1697822b3d6384c6bc620d7048c05fca819
        # save the attached .config to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=mips SHELL=/bin/bash net/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   net/core/sock.c: In function 'sock_setsockopt':
>> net/core/sock.c:1417:14: error: 'SO_RESERVE_MEM' undeclared (first use in this function)
    1417 |         case SO_RESERVE_MEM:
         |              ^~~~~~~~~~~~~~
   net/core/sock.c:1417:14: note: each undeclared identifier is reported only once for each function it appears in
   net/core/sock.c: In function 'sock_getsockopt':
   net/core/sock.c:1800:14: error: 'SO_RESERVE_MEM' undeclared (first use in this function)
    1800 |         case SO_RESERVE_MEM:
         |              ^~~~~~~~~~~~~~


vim +/SO_RESERVE_MEM +1417 net/core/sock.c

  1330	
  1331		case SO_MAX_PACING_RATE:
  1332			{
  1333			unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
  1334	
  1335			if (sizeof(ulval) != sizeof(val) &&
  1336			    optlen >= sizeof(ulval) &&
  1337			    copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
  1338				ret = -EFAULT;
  1339				break;
  1340			}
  1341			if (ulval != ~0UL)
  1342				cmpxchg(&sk->sk_pacing_status,
  1343					SK_PACING_NONE,
  1344					SK_PACING_NEEDED);
  1345			sk->sk_max_pacing_rate = ulval;
  1346			sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
  1347			break;
  1348			}
  1349		case SO_INCOMING_CPU:
  1350			WRITE_ONCE(sk->sk_incoming_cpu, val);
  1351			break;
  1352	
  1353		case SO_CNX_ADVICE:
  1354			if (val == 1)
  1355				dst_negative_advice(sk);
  1356			break;
  1357	
  1358		case SO_ZEROCOPY:
  1359			if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
  1360				if (!((sk->sk_type == SOCK_STREAM &&
  1361				       sk->sk_protocol == IPPROTO_TCP) ||
  1362				      (sk->sk_type == SOCK_DGRAM &&
  1363				       sk->sk_protocol == IPPROTO_UDP)))
  1364					ret = -ENOTSUPP;
  1365			} else if (sk->sk_family != PF_RDS) {
  1366				ret = -ENOTSUPP;
  1367			}
  1368			if (!ret) {
  1369				if (val < 0 || val > 1)
  1370					ret = -EINVAL;
  1371				else
  1372					sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
  1373			}
  1374			break;
  1375	
  1376		case SO_TXTIME:
  1377			if (optlen != sizeof(struct sock_txtime)) {
  1378				ret = -EINVAL;
  1379				break;
  1380			} else if (copy_from_sockptr(&sk_txtime, optval,
  1381				   sizeof(struct sock_txtime))) {
  1382				ret = -EFAULT;
  1383				break;
  1384			} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
  1385				ret = -EINVAL;
  1386				break;
  1387			}
  1388			/* CLOCK_MONOTONIC is only used by sch_fq, and this packet
  1389			 * scheduler has enough safe guards.
  1390			 */
  1391			if (sk_txtime.clockid != CLOCK_MONOTONIC &&
  1392			    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
  1393				ret = -EPERM;
  1394				break;
  1395			}
  1396			sock_valbool_flag(sk, SOCK_TXTIME, true);
  1397			sk->sk_clockid = sk_txtime.clockid;
  1398			sk->sk_txtime_deadline_mode =
  1399				!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
  1400			sk->sk_txtime_report_errors =
  1401				!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
  1402			break;
  1403	
  1404		case SO_BINDTOIFINDEX:
  1405			ret = sock_bindtoindex_locked(sk, val);
  1406			break;
  1407	
  1408		case SO_BUF_LOCK:
  1409			if (val & ~SOCK_BUF_LOCK_MASK) {
  1410				ret = -EINVAL;
  1411				break;
  1412			}
  1413			sk->sk_userlocks = val | (sk->sk_userlocks &
  1414						  ~SOCK_BUF_LOCK_MASK);
  1415			break;
  1416	
> 1417		case SO_RESERVE_MEM:
  1418		{
  1419			int delta;
  1420	
  1421			if (val < 0) {
  1422				ret = -EINVAL;
  1423				break;
  1424			}
  1425	
  1426			delta = val - sk->sk_reserved_mem;
  1427			if (delta < 0)
  1428				sock_release_reserved_memory(sk, -delta);
  1429			else
  1430				ret = sock_reserve_memory(sk, delta);
  1431			break;
  1432		}
  1433	
  1434		default:
  1435			ret = -ENOPROTOOPT;
  1436			break;
  1437		}
  1438		release_sock(sk);
  1439		return ret;
  1440	}
  1441	EXPORT_SYMBOL(sock_setsockopt);
  1442	
  1443	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 30680 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-11-12 13:13 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-27 18:25 [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Wei Wang
2021-09-27 18:25 ` [PATCH net-next 1/3] " Wei Wang
2021-11-12 13:13   ` kernel test robot
2021-09-27 18:25 ` [PATCH net-next 2/3] tcp: adjust sndbuf according to sk_reserved_mem Wei Wang
2021-09-27 18:25 ` [PATCH net-next 3/3] tcp: adjust rcv_ssthresh " Wei Wang
2021-09-28  0:50 ` [PATCH net-next 0/3] net: add new socket option SO_RESERVE_MEM Jakub Kicinski
2021-09-28  3:33   ` Wei Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.