linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] net: TCP/IP: A few minor cleanups
@ 2017-09-08  5:05 Michael Witten
  2017-09-08  5:05 ` [PATCH 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
                   ` (3 more replies)
  0 siblings, 4 replies; 21+ messages in thread
From: Michael Witten @ 2017-09-08  5:05 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI; +Cc: netdev, linux-kernel

The following patch series is an ad hoc "cleanup" that I made
while perusing the code (I'm not well versed in this code, so I
would not be surprised if there were objections to the changes):

  [1] net: __sock_cmsg_send(): Remove unused parameter `msg'
  [2] net: inet_recvmsg(): Remove unnecessary bitwise operation.
  [3] net: skb_queue_purge(): lock/unlock the list only once

Each patch will be sent as an individiual email; the total diff
is appended below for your convenience.

You may also fetch these patches from GitHub:

  git checkout -b test 5969d1bb3082b41eba8fd2c826559abe38ccb6df
  git pull https://github.com/mfwitten/linux.git net/tcp-ip/01-cleanup/00

Overall:

  include/net/sock.h     | 2 +-
  net/core/skbuff.c      | 6 +++++-
  net/core/sock.c        | 4 ++--
  net/ipv4/af_inet.c     | 2 +-
  net/ipv4/ip_sockglue.c | 2 +-
  net/ipv6/datagram.c    | 2 +-
  6 files changed, 11 insertions(+), 7 deletions(-)

Sincerly,
Michael Witten

diff --git a/include/net/sock.h b/include/net/sock.h
index 03a362568357..83373d7148a9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1562,7 +1562,7 @@ struct sockcm_cookie {
 	u16 tsflags;
 };
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc);
 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 		   struct sockcm_cookie *sockc);
diff --git a/net/core/sock.c b/net/core/sock.c
index 9b7b6bbb2a23..425e03fe1c56 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2091,7 +2091,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc)
 {
 	u32 tsflags;
@@ -2137,7 +2137,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 			return -EINVAL;
 		if (cmsg->cmsg_level != SOL_SOCKET)
 			continue;
-		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
+		ret = __sock_cmsg_send(sk, cmsg, sockc);
 		if (ret)
 			return ret;
 	}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index e558e4f9597b..c79b7822b0b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -263,7 +263,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
 		}
 #endif
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
+			err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
 			if (err)
 				return err;
 			continue;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a1f918713006..1d1926a4cbe2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -756,7 +756,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
 		}
 
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+			err = __sock_cmsg_send(sk, cmsg, sockc);
 			if (err)
 				return err;
 			continue;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e31108e5ef79..2dbed042a412 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -791,7 +791,7 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	sock_rps_record_flow(sk);
 
 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
-				   flags & ~MSG_DONTWAIT, &addr_len);
+				   flags, &addr_len);
 	if (err >= 0)
 		msg->msg_namelen = addr_len;
 	return err;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68065d7d383f..66c0731a2a5f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2834,9 +2834,13 @@ EXPORT_SYMBOL(skb_dequeue_tail);
  */
 void skb_queue_purge(struct sk_buff_head *list)
 {
+	unsigned long flags;
 	struct sk_buff *skb;
-	while ((skb = skb_dequeue(list)) != NULL)
+
+	spin_lock_irqsave(&list->lock, flags);
+	while ((skb = __skb_dequeue(list)) != NULL)
 		kfree_skb(skb);
+	spin_unlock_irqrestore(&list->lock, flags);
 }
 EXPORT_SYMBOL(skb_queue_purge);
 
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg'
  2017-09-08  5:05 [PATCH 0/3] net: TCP/IP: A few minor cleanups Michael Witten
@ 2017-09-08  5:05 ` Michael Witten
  2017-09-08  5:06 ` [PATCH 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation Michael Witten
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 21+ messages in thread
From: Michael Witten @ 2017-09-08  5:05 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI; +Cc: netdev, linux-kernel

Date: Thu, 7 Sep 2017 03:21:38 +0000
Signed-off-by: Michael Witten <mfwitten@gmail.com>
---
 include/net/sock.h     | 2 +-
 net/core/sock.c        | 4 ++--
 net/ipv4/ip_sockglue.c | 2 +-
 net/ipv6/datagram.c    | 2 +-
 4 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index 03a362568357..83373d7148a9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1562,7 +1562,7 @@ struct sockcm_cookie {
 	u16 tsflags;
 };
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc);
 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 		   struct sockcm_cookie *sockc);
diff --git a/net/core/sock.c b/net/core/sock.c
index 9b7b6bbb2a23..425e03fe1c56 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2091,7 +2091,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc)
 {
 	u32 tsflags;
@@ -2137,7 +2137,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 			return -EINVAL;
 		if (cmsg->cmsg_level != SOL_SOCKET)
 			continue;
-		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
+		ret = __sock_cmsg_send(sk, cmsg, sockc);
 		if (ret)
 			return ret;
 	}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index e558e4f9597b..c79b7822b0b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -263,7 +263,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
 		}
 #endif
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
+			err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
 			if (err)
 				return err;
 			continue;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a1f918713006..1d1926a4cbe2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -756,7 +756,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
 		}
 
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+			err = __sock_cmsg_send(sk, cmsg, sockc);
 			if (err)
 				return err;
 			continue;
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation
  2017-09-08  5:05 [PATCH 0/3] net: TCP/IP: A few minor cleanups Michael Witten
  2017-09-08  5:05 ` [PATCH 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
@ 2017-09-08  5:06 ` Michael Witten
  2017-09-08  5:06 ` [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once Michael Witten
  2017-10-01 22:19 ` [PATCH net 0/3] net: TCP/IP: A few minor cleanups Michael Witten
  3 siblings, 0 replies; 21+ messages in thread
From: Michael Witten @ 2017-09-08  5:06 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI; +Cc: netdev, linux-kernel

Date: Fri, 8 Sep 2017 00:47:49 +0000
The flag `MSG_DONTWAIT' is handled by passing an argument through
the dedicated parameter `nonblock' of the function `tcp_recvmsg()'.

Presumably because `MSG_DONTWAIT' is handled so explicitly, it is
unset in the collection of flags that are passed to `tcp_recvmsg()';
yet, this unsetting appears to be unnecessary, and so this commit
removes the bitwise operation that performs the unsetting.

Signed-off-by: Michael Witten <mfwitten@gmail.com>
---
 net/ipv4/af_inet.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e31108e5ef79..2dbed042a412 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -791,7 +791,7 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	sock_rps_record_flow(sk);
 
 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
-				   flags & ~MSG_DONTWAIT, &addr_len);
+				   flags, &addr_len);
 	if (err >= 0)
 		msg->msg_namelen = addr_len;
 	return err;
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once
  2017-09-08  5:05 [PATCH 0/3] net: TCP/IP: A few minor cleanups Michael Witten
  2017-09-08  5:05 ` [PATCH 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
  2017-09-08  5:06 ` [PATCH 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation Michael Witten
@ 2017-09-08  5:06 ` Michael Witten
  2017-09-08 16:01   ` Eric Dumazet
                     ` (3 more replies)
  2017-10-01 22:19 ` [PATCH net 0/3] net: TCP/IP: A few minor cleanups Michael Witten
  3 siblings, 4 replies; 21+ messages in thread
From: Michael Witten @ 2017-09-08  5:06 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI; +Cc: netdev, linux-kernel

Date: Thu, 7 Sep 2017 20:07:40 +0000
With this commit, the list's lock is locked/unlocked only once
for the duration of `skb_queue_purge()'.

Hitherto, the list's lock has been locked/unlocked every time
an item is dequeued; this seems not only inefficient, but also
incorrect, as the whole point of `skb_queue_purge()' is to clear
the list, presumably without giving anything else a chance to
manipulate the list in the interim.

Signed-off-by: Michael Witten <mfwitten@gmail.com>
---
 net/core/skbuff.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68065d7d383f..66c0731a2a5f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2834,9 +2834,13 @@ EXPORT_SYMBOL(skb_dequeue_tail);
  */
 void skb_queue_purge(struct sk_buff_head *list)
 {
+	unsigned long flags;
 	struct sk_buff *skb;
-	while ((skb = skb_dequeue(list)) != NULL)
+
+	spin_lock_irqsave(&list->lock, flags);
+	while ((skb = __skb_dequeue(list)) != NULL)
 		kfree_skb(skb);
+	spin_unlock_irqrestore(&list->lock, flags);
 }
 EXPORT_SYMBOL(skb_queue_purge);
 
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once
  2017-09-08  5:06 ` [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once Michael Witten
@ 2017-09-08 16:01   ` Eric Dumazet
  2017-09-08 16:51   ` Stephen Hemminger
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 21+ messages in thread
From: Eric Dumazet @ 2017-09-08 16:01 UTC (permalink / raw)
  To: Michael Witten
  Cc: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI, netdev,
	linux-kernel

On Fri, 2017-09-08 at 05:06 +0000, Michael Witten wrote:
> Date: Thu, 7 Sep 2017 20:07:40 +0000
> With this commit, the list's lock is locked/unlocked only once
> for the duration of `skb_queue_purge()'.
> 
> Hitherto, the list's lock has been locked/unlocked every time
> an item is dequeued; this seems not only inefficient, but also
> incorrect, as the whole point of `skb_queue_purge()' is to clear
> the list, presumably without giving anything else a chance to
> manipulate the list in the interim.
> 
> Signed-off-by: Michael Witten <mfwitten@gmail.com>
> ---
>  net/core/skbuff.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 68065d7d383f..66c0731a2a5f 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -2834,9 +2834,13 @@ EXPORT_SYMBOL(skb_dequeue_tail);
>   */
>  void skb_queue_purge(struct sk_buff_head *list)
>  {
> +	unsigned long flags;
>  	struct sk_buff *skb;
> -	while ((skb = skb_dequeue(list)) != NULL)
> +
> +	spin_lock_irqsave(&list->lock, flags);
> +	while ((skb = __skb_dequeue(list)) != NULL)
>  		kfree_skb(skb);
> +	spin_unlock_irqrestore(&list->lock, flags);
>  }
>  EXPORT_SYMBOL(skb_queue_purge);
>  


No, this is very wrong :

Holding hard IRQ for a potential very long time is going to break
horribly. Some lists can have 10,000+ skbs in them.

Note that net-next tree is currently closed, please read 
Documentation/networking/netdev-FAQ.txt

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once
  2017-09-08  5:06 ` [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once Michael Witten
  2017-09-08 16:01   ` Eric Dumazet
@ 2017-09-08 16:51   ` Stephen Hemminger
  2017-09-09  5:50   ` [PATCH v1 3/3] net: skb_queue_purge(): lock/unlock the queue " Michael Witten
  2017-10-01 22:19   ` [PATCH net " Michael Witten
  3 siblings, 0 replies; 21+ messages in thread
From: Stephen Hemminger @ 2017-09-08 16:51 UTC (permalink / raw)
  To: Michael Witten
  Cc: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI, netdev,
	linux-kernel

On Fri, 08 Sep 2017 05:06:30 -0000
Michael Witten <mfwitten@gmail.com> wrote:

> Date: Thu, 7 Sep 2017 20:07:40 +0000
> With this commit, the list's lock is locked/unlocked only once
> for the duration of `skb_queue_purge()'.
> 
> Hitherto, the list's lock has been locked/unlocked every time
> an item is dequeued; this seems not only inefficient, but also
> incorrect, as the whole point of `skb_queue_purge()' is to clear
> the list, presumably without giving anything else a chance to
> manipulate the list in the interim.
> 
> Signed-off-by: Michael Witten <mfwitten@gmail.com>
> ---
>  net/core/skbuff.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 68065d7d383f..66c0731a2a5f 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -2834,9 +2834,13 @@ EXPORT_SYMBOL(skb_dequeue_tail);
>   */
>  void skb_queue_purge(struct sk_buff_head *list)
>  {
> +	unsigned long flags;
>  	struct sk_buff *skb;
> -	while ((skb = skb_dequeue(list)) != NULL)
> +
> +	spin_lock_irqsave(&list->lock, flags);
> +	while ((skb = __skb_dequeue(list)) != NULL)
>  		kfree_skb(skb);
> +	spin_unlock_irqrestore(&list->lock, flags);
>  }
>  EXPORT_SYMBOL(skb_queue_purge);
>  

As Eric said, this won't work.

Instead why not introduce something list splice which moves next/prev
of list head to a local list on  the stack.

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68065d7d383f..4988b6efdcc8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2824,6 +2824,44 @@ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
 }
 EXPORT_SYMBOL(skb_dequeue_tail);
 
+static void __skb_splice(const struct sk_buff_head *list,
+			 struct sk_buff *prev,
+			 struct sk_buff *next)
+{
+	struct sk_buff *first = list->next;
+	struct sk_buff *last = list->prev;
+
+	list->qlen = 0;
+
+	first->prev = prev;
+	prev->next = first;
+
+	list->next = next;
+	next->prev = last;
+}
+
+/**
+ *	skb_splice - join two lists, and initialize the emptied list
+ *	@list: the new list to add
+ *	@head: the pace to add it in the first list
+ *
+ *	Take the first list (@list) and merge it onto the
+ *	head of existing list (@head).
+ */
+static void skb_splice_init(const struct sk_buff_head *list,
+			    struct sk_buff_head *head)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&list->lock, flags);
+	if (list->qlen > 0) {
+		head->qlen += list->qlen;
+		__skb_splice(list, head, head->next);
+		__skb_queue_head_init(list);
+	}
+	spin_unlock_irqrestore(&list->lock, flags);
+}
+
 /**
  *	skb_queue_purge - empty a list
  *	@list: list to empty
@@ -2835,7 +2873,12 @@ EXPORT_SYMBOL(skb_dequeue_tail);
 void skb_queue_purge(struct sk_buff_head *list)
 {
 	struct sk_buff *skb;
-	while ((skb = skb_dequeue(list)) != NULL)
+	struct skb_buff_head tmp;
+
+	__skb_queue_head_init(&tmp);
+	skb_splice_init(list, &tmp);
+
+	while ((skb = __skb_dequeue(list)) != NULL)
 		kfree_skb(skb);
 }
 EXPORT_SYMBOL(skb_queue_purge);

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v1 3/3] net: skb_queue_purge(): lock/unlock the queue only once
  2017-09-08  5:06 ` [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once Michael Witten
  2017-09-08 16:01   ` Eric Dumazet
  2017-09-08 16:51   ` Stephen Hemminger
@ 2017-09-09  5:50   ` Michael Witten
  2017-09-09 16:52     ` Eric Dumazet
  2017-10-01 22:19   ` [PATCH net " Michael Witten
  3 siblings, 1 reply; 21+ messages in thread
From: Michael Witten @ 2017-09-09  5:50 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI
  Cc: Stephen Hemminger, Eric Dumazet, netdev, linux-kernel

Thanks for your input, Eric Dumazet and Stephen Hemminger; based on
your observations, this version of the patch implements a very
lightweight purging of the queue.

To apply this patch, save this email to:

  /path/to/email

and then run:

  git am --scissors /path/to/email

You may also fetch this patch from GitHub:

  git checkout -b test 5969d1bb3082b41eba8fd2c826559abe38ccb6df
  git pull https://github.com/mfwitten/linux.git net/tcp-ip/01-cleanup/02

Sincerely,
Michael Witten

8<----8<----8<----8<----8<----8<----8<----8<----8<----8<----8<----8<----8<----

Hitherto, the queue's lock has been locked/unlocked every time
an item is dequeued; this seems not only inefficient, but also
incorrect, as the whole point of `skb_queue_purge()' is to clear
the queue, presumably without giving any other thread a chance to
manipulate the queue in the interim.

With this commit, the queue's lock is locked/unlocked only once
when `skb_queue_purge()' is called, and in a way that disables
the IRQs for only a minimal amount of time.

This is achieved by atomically re-initializing the queue (thereby
clearing it), and then freeing each of the items as though it were
enqueued in a private queue that doesn't require locking.

Signed-off-by: Michael Witten <mfwitten@gmail.com>
---
 net/core/skbuff.c | 26 ++++++++++++++++++--------
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68065d7d383f..bd26b0bde784 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2825,18 +2825,28 @@ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
 EXPORT_SYMBOL(skb_dequeue_tail);
 
 /**
- *	skb_queue_purge - empty a list
- *	@list: list to empty
+ *	skb_queue_purge - empty a queue
+ *	@q: the queue to empty
  *
- *	Delete all buffers on an &sk_buff list. Each buffer is removed from
- *	the list and one reference dropped. This function takes the list
- *	lock and is atomic with respect to other list locking functions.
+ *	Dequeue and free each socket buffer that is in @q.
+ *
+ *	This function is atomic with respect to other queue-locking functions.
  */
-void skb_queue_purge(struct sk_buff_head *list)
+void skb_queue_purge(struct sk_buff_head *q)
 {
-	struct sk_buff *skb;
-	while ((skb = skb_dequeue(list)) != NULL)
+	unsigned long flags;
+	struct sk_buff *skb, *next, *head = (struct sk_buff *)q;
+
+	spin_lock_irqsave(&q->lock, flags);
+	skb = q->next;
+	__skb_queue_head_init(q);
+	spin_unlock_irqrestore(&q->lock, flags);
+
+	while (skb != head) {
+		next = skb->next;
 		kfree_skb(skb);
+		skb = next;
+	}
 }
 EXPORT_SYMBOL(skb_queue_purge);
 
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH v1 3/3] net: skb_queue_purge(): lock/unlock the queue only once
  2017-09-09  5:50   ` [PATCH v1 3/3] net: skb_queue_purge(): lock/unlock the queue " Michael Witten
@ 2017-09-09 16:52     ` Eric Dumazet
  0 siblings, 0 replies; 21+ messages in thread
From: Eric Dumazet @ 2017-09-09 16:52 UTC (permalink / raw)
  To: Michael Witten
  Cc: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI,
	Stephen Hemminger, netdev, linux-kernel

On Sat, 2017-09-09 at 05:50 +0000, Michael Witten wrote:
> Thanks for your input, Eric Dumazet and Stephen Hemminger; based on
> your observations, this version of the patch implements a very
> lightweight purging of the queue.

net-next is closed.

Documentation/networking/netdev-FAQ.txt

Meaning we are chasing bugs at this moment, not adding new ones.

Thanks.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH net 0/3] net: TCP/IP: A few minor cleanups
  2017-09-08  5:05 [PATCH 0/3] net: TCP/IP: A few minor cleanups Michael Witten
                   ` (2 preceding siblings ...)
  2017-09-08  5:06 ` [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once Michael Witten
@ 2017-10-01 22:19 ` Michael Witten
  2017-10-01 22:19   ` [PATCH net 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
                     ` (2 more replies)
  3 siblings, 3 replies; 21+ messages in thread
From: Michael Witten @ 2017-10-01 22:19 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI
  Cc: Stephen Hemminger, Eric Dumazet, netdev, linux-kernel

The following patch series is an ad hoc "cleanup" that I made
while perusing the code (I'm not well versed in this code, so I
would not be surprised if there were objections to the changes):

  [1] net: __sock_cmsg_send(): Remove unused parameter `msg'
  [2] net: inet_recvmsg(): Remove unnecessary bitwise operation
  [3] net: skb_queue_purge(): lock/unlock the queue only once

Each patch will be sent as an individual reply to this email;
the total diff is appended below for your convenience.

You may also fetch these patches from GitHub:

  git checkout --detach 5969d1bb3082b41eba8fd2c826559abe38ccb6df
  git pull https://github.com/mfwitten/linux.git net/tcp-ip/01-cleanup/02

Overall:

  include/net/sock.h     |  2 +-
  net/core/skbuff.c      | 26 ++++++++++++++++++--------
  net/core/sock.c        |  4 ++--
  net/ipv4/af_inet.c     |  2 +-
  net/ipv4/ip_sockglue.c |  2 +-
  net/ipv6/datagram.c    |  2 +-
  6 files changed, 24 insertions(+), 14 deletions(-)

Sincerly,
Michael Witten

diff --git a/include/net/sock.h b/include/net/sock.h
index 03a362568357..83373d7148a9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1562,7 +1562,7 @@ struct sockcm_cookie {
 	u16 tsflags;
 };
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc);
 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 		   struct sockcm_cookie *sockc);
diff --git a/net/core/sock.c b/net/core/sock.c
index 9b7b6bbb2a23..425e03fe1c56 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2091,7 +2091,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc)
 {
 	u32 tsflags;
@@ -2137,7 +2137,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 			return -EINVAL;
 		if (cmsg->cmsg_level != SOL_SOCKET)
 			continue;
-		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
+		ret = __sock_cmsg_send(sk, cmsg, sockc);
 		if (ret)
 			return ret;
 	}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index e558e4f9597b..c79b7822b0b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -263,7 +263,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
 		}
 #endif
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
+			err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
 			if (err)
 				return err;
 			continue;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a1f918713006..1d1926a4cbe2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -756,7 +756,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
 		}
 
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+			err = __sock_cmsg_send(sk, cmsg, sockc);
 			if (err)
 				return err;
 			continue;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e31108e5ef79..2dbed042a412 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -791,7 +791,7 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	sock_rps_record_flow(sk);
 
 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
-				   flags & ~MSG_DONTWAIT, &addr_len);
+				   flags, &addr_len);
 	if (err >= 0)
 		msg->msg_namelen = addr_len;
 	return err;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68065d7d383f..bd26b0bde784 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2825,18 +2825,28 @@
 EXPORT_SYMBOL(skb_dequeue_tail);
 
 /**
- *	skb_queue_purge - empty a list
- *	@list: list to empty
+ *	skb_queue_purge - empty a queue
+ *	@q: the queue to empty
  *
- *	Delete all buffers on an &sk_buff list. Each buffer is removed from
- *	the list and one reference dropped. This function takes the list
- *	lock and is atomic with respect to other list locking functions.
+ *	Dequeue and free each socket buffer that is in @q.
+ *
+ *	This function is atomic with respect to other queue-locking functions.
  */
-void skb_queue_purge(struct sk_buff_head *list)
+void skb_queue_purge(struct sk_buff_head *q)
 {
-	struct sk_buff *skb;
-	while ((skb = skb_dequeue(list)) != NULL)
+	unsigned long flags;
+	struct sk_buff *skb, *next, *head = (struct sk_buff *)q;
+
+	spin_lock_irqsave(&q->lock, flags);
+	skb = q->next;
+	__skb_queue_head_init(q);
+	spin_unlock_irqrestore(&q->lock, flags);
+
+	while (skb != head) {
+		next = skb->next;
 		kfree_skb(skb);
+		skb = next;
+	}
 }
 EXPORT_SYMBOL(skb_queue_purge);
 
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH net 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg'
  2017-10-01 22:19 ` [PATCH net 0/3] net: TCP/IP: A few minor cleanups Michael Witten
@ 2017-10-01 22:19   ` Michael Witten
  2017-10-01 22:19   ` [PATCH net 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation Michael Witten
  2018-02-06  0:54   ` Please apply these tiny, 4-month-old patches Michael Witten
  2 siblings, 0 replies; 21+ messages in thread
From: Michael Witten @ 2017-10-01 22:19 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI
  Cc: Stephen Hemminger, Eric Dumazet, netdev, linux-kernel

Date: Thu, 7 Sep 2017 03:21:38 +0000
Signed-off-by: Michael Witten <mfwitten@gmail.com>
---
 include/net/sock.h     | 2 +-
 net/core/sock.c        | 4 ++--
 net/ipv4/ip_sockglue.c | 2 +-
 net/ipv6/datagram.c    | 2 +-
 4 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index 03a362568357..83373d7148a9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1562,7 +1562,7 @@ struct sockcm_cookie {
 	u16 tsflags;
 };
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc);
 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 		   struct sockcm_cookie *sockc);
diff --git a/net/core/sock.c b/net/core/sock.c
index 9b7b6bbb2a23..425e03fe1c56 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2091,7 +2091,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
 		     struct sockcm_cookie *sockc)
 {
 	u32 tsflags;
@@ -2137,7 +2137,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 			return -EINVAL;
 		if (cmsg->cmsg_level != SOL_SOCKET)
 			continue;
-		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
+		ret = __sock_cmsg_send(sk, cmsg, sockc);
 		if (ret)
 			return ret;
 	}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index e558e4f9597b..c79b7822b0b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -263,7 +263,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
 		}
 #endif
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
+			err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
 			if (err)
 				return err;
 			continue;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a1f918713006..1d1926a4cbe2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -756,7 +756,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
 		}
 
 		if (cmsg->cmsg_level == SOL_SOCKET) {
-			err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+			err = __sock_cmsg_send(sk, cmsg, sockc);
 			if (err)
 				return err;
 			continue;
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH net 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation
  2017-10-01 22:19 ` [PATCH net 0/3] net: TCP/IP: A few minor cleanups Michael Witten
  2017-10-01 22:19   ` [PATCH net 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
@ 2017-10-01 22:19   ` Michael Witten
  2018-02-06  0:54   ` Please apply these tiny, 4-month-old patches Michael Witten
  2 siblings, 0 replies; 21+ messages in thread
From: Michael Witten @ 2017-10-01 22:19 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI
  Cc: Stephen Hemminger, Eric Dumazet, netdev, linux-kernel

Date: Fri, 8 Sep 2017 00:47:49 +0000
The flag `MSG_DONTWAIT' is handled by passing an argument through
the dedicated parameter `nonblock' of the function `tcp_recvmsg()'.

Presumably because `MSG_DONTWAIT' is handled so explicitly, it is
unset in the collection of flags that are passed to `tcp_recvmsg()';
yet, this unsetting appears to be unnecessary, and so this commit
removes the bitwise operation that performs the unsetting.

Signed-off-by: Michael Witten <mfwitten@gmail.com>
---
 net/ipv4/af_inet.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e31108e5ef79..2dbed042a412 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -791,7 +791,7 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	sock_rps_record_flow(sk);
 
 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
-				   flags & ~MSG_DONTWAIT, &addr_len);
+				   flags, &addr_len);
 	if (err >= 0)
 		msg->msg_namelen = addr_len;
 	return err;
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH net 3/3] net: skb_queue_purge(): lock/unlock the queue only once
  2017-09-08  5:06 ` [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once Michael Witten
                     ` (2 preceding siblings ...)
  2017-09-09  5:50   ` [PATCH v1 3/3] net: skb_queue_purge(): lock/unlock the queue " Michael Witten
@ 2017-10-01 22:19   ` Michael Witten
  2017-10-02  0:59     ` Stephen Hemminger
  3 siblings, 1 reply; 21+ messages in thread
From: Michael Witten @ 2017-10-01 22:19 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI
  Cc: Stephen Hemminger, Eric Dumazet, netdev, linux-kernel

Date: Sat, 9 Sep 2017 05:50:23 +0000
Hitherto, the queue's lock has been locked/unlocked every time
an item is dequeued; this seems not only inefficient, but also
incorrect, as the whole point of `skb_queue_purge()' is to clear
the queue, presumably without giving any other thread a chance to
manipulate the queue in the interim.

With this commit, the queue's lock is locked/unlocked only once
when `skb_queue_purge()' is called, and in a way that disables
the IRQs for only a minimal amount of time.

This is achieved by atomically re-initializing the queue (thereby
clearing it), and then freeing each of the items as though it were
enqueued in a private queue that doesn't require locking.

Signed-off-by: Michael Witten <mfwitten@gmail.com>
---
 net/core/skbuff.c | 26 ++++++++++++++++++--------
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68065d7d383f..bd26b0bde784 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2825,18 +2825,28 @@ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
 EXPORT_SYMBOL(skb_dequeue_tail);
 
 /**
- *	skb_queue_purge - empty a list
- *	@list: list to empty
+ *	skb_queue_purge - empty a queue
+ *	@q: the queue to empty
  *
- *	Delete all buffers on an &sk_buff list. Each buffer is removed from
- *	the list and one reference dropped. This function takes the list
- *	lock and is atomic with respect to other list locking functions.
+ *	Dequeue and free each socket buffer that is in @q.
+ *
+ *	This function is atomic with respect to other queue-locking functions.
  */
-void skb_queue_purge(struct sk_buff_head *list)
+void skb_queue_purge(struct sk_buff_head *q)
 {
-	struct sk_buff *skb;
-	while ((skb = skb_dequeue(list)) != NULL)
+	unsigned long flags;
+	struct sk_buff *skb, *next, *head = (struct sk_buff *)q;
+
+	spin_lock_irqsave(&q->lock, flags);
+	skb = q->next;
+	__skb_queue_head_init(q);
+	spin_unlock_irqrestore(&q->lock, flags);
+
+	while (skb != head) {
+		next = skb->next;
 		kfree_skb(skb);
+		skb = next;
+	}
 }
 EXPORT_SYMBOL(skb_queue_purge);
 
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH net 3/3] net: skb_queue_purge(): lock/unlock the queue only once
  2017-10-01 22:19   ` [PATCH net " Michael Witten
@ 2017-10-02  0:59     ` Stephen Hemminger
  2017-10-02  5:15       ` Michael Witten
  0 siblings, 1 reply; 21+ messages in thread
From: Stephen Hemminger @ 2017-10-02  0:59 UTC (permalink / raw)
  To: Michael Witten
  Cc: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI,
	Eric Dumazet, netdev, linux-kernel

On Sun, 01 Oct 2017 22:19:20 -0000
Michael Witten <mfwitten@gmail.com> wrote:

> +	spin_lock_irqsave(&q->lock, flags);
> +	skb = q->next;
> +	__skb_queue_head_init(q);
> +	spin_unlock_irqrestore(&q->lock, flags);

Other code manipulating lists uses splice operation and
a sk_buff_head temporary on the stack. That would be easier
to understand.

	struct sk_buf_head head;

	__skb_queue_head_init(&head);
	spin_lock_irqsave(&q->lock, flags);
	skb_queue_splice_init(q, &head);
	spin_unlock_irqrestore(&q->lock, flags);


> +	while (skb != head) {
> +		next = skb->next;
>  		kfree_skb(skb);
> +		skb = next;
> +	}

It would be cleaner if you could use
skb_queue_walk_safe rather than open coding the loop.

	skb_queue_walk_safe(&head, skb,  tmp)
		kfree_skb(skb);

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH net 3/3] net: skb_queue_purge(): lock/unlock the queue only once
  2017-10-02  0:59     ` Stephen Hemminger
@ 2017-10-02  5:15       ` Michael Witten
  2017-10-02 14:55         ` Stephen Hemminger
  0 siblings, 1 reply; 21+ messages in thread
From: Michael Witten @ 2017-10-02  5:15 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI,
	Eric Dumazet, netdev, linux-kernel

On Sun, 1 Oct 2017 17:59:09 -0700, Stephen Hemminger wrote:

> On Sun, 01 Oct 2017 22:19:20 -0000 Michael Witten wrote:
>
>> +	spin_lock_irqsave(&q->lock, flags);
>> +	skb = q->next;
>> +	__skb_queue_head_init(q);
>> +	spin_unlock_irqrestore(&q->lock, flags);
>
> Other code manipulating lists uses splice operation and
> a sk_buff_head temporary on the stack. That would be easier
> to understand.
>
> 	struct sk_buf_head head;
>
> 	__skb_queue_head_init(&head);
> 	spin_lock_irqsave(&q->lock, flags);
> 	skb_queue_splice_init(q, &head);
> 	spin_unlock_irqrestore(&q->lock, flags);
>
>
>> +	while (skb != head) {
>> +		next = skb->next;
>>  		kfree_skb(skb);
>> +		skb = next;
>> +	}
>
> It would be cleaner if you could use
> skb_queue_walk_safe rather than open coding the loop.
>
> 	skb_queue_walk_safe(&head, skb,  tmp)
> 		kfree_skb(skb);

I appreciate abstraction as much as anybody, but I do not believe
that such abstractions would actually be an improvement here.

* Splice-initing seems more like an idiom than an abstraction;
  at first blush, it wouldn't be clear to me what the intention
  is.

* Such abstractions are fairly unnecessary.

    * The function as written is already so short as to be
      easily digested.

    * More to the point, this function is not some generic,
      higher-level algorithm that just happens to employ the
      socket buffer interface; rather, it is a function that
      implements part of that very interface, and may thus
      twiddle the intimate bits of these data structures
      without being accused of abusing a leaky abstraction.

* Such abstractions add overhead, if only conceptually. In this
  case, a temporary socket buffer queue allocates *3* unnecessary
  struct members, including a whole `spinlock_t' member:
  
    prev
    qlen
    lock

  It's possible that the compiler will be smart enough to leave
  those out, but I have my suspicions that it won't, not only
  given that the interface contract requires that the temporary
  socket buffer queue be properly initialized before use, but
  also because splicing into the temporary will manipulate its
  `qlen'. Yet, why worry whether optimization happens? The whole
  issue can simply be avoided by exploiting the intimate details
  that are already philosophically available to us.

  Similarly, the function `skb_queue_walk_safe' is nice, but it
  loses value both because a temporary queue loses value (as just
  described), and because it ignores the fact that legitimate
  access to the internals of these data structures allows for
  setting up the requested loop in advance; that is to say, the
  two parts of the function that we are now debating can be woven
  together more tightly than `skb_queue_walk_safe' allows.

For these reasons, I stand by the way that the patch currently
implements this function; it does exactly what is desired, no more
or less.

Sincerely,
Michael Witten

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH net 3/3] net: skb_queue_purge(): lock/unlock the queue only once
  2017-10-02  5:15       ` Michael Witten
@ 2017-10-02 14:55         ` Stephen Hemminger
  0 siblings, 0 replies; 21+ messages in thread
From: Stephen Hemminger @ 2017-10-02 14:55 UTC (permalink / raw)
  To: Michael Witten
  Cc: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI,
	Eric Dumazet, netdev, linux-kernel

On Mon, 02 Oct 2017 05:15:32 -0000
Michael Witten <mfwitten@gmail.com> wrote:

> On Sun, 1 Oct 2017 17:59:09 -0700, Stephen Hemminger wrote:
> 
> > On Sun, 01 Oct 2017 22:19:20 -0000 Michael Witten wrote:
> >  
> >> +	spin_lock_irqsave(&q->lock, flags);
> >> +	skb = q->next;
> >> +	__skb_queue_head_init(q);
> >> +	spin_unlock_irqrestore(&q->lock, flags);  
> >
> > Other code manipulating lists uses splice operation and
> > a sk_buff_head temporary on the stack. That would be easier
> > to understand.
> >
> > 	struct sk_buf_head head;
> >
> > 	__skb_queue_head_init(&head);
> > 	spin_lock_irqsave(&q->lock, flags);
> > 	skb_queue_splice_init(q, &head);
> > 	spin_unlock_irqrestore(&q->lock, flags);
> >
> >  
> >> +	while (skb != head) {
> >> +		next = skb->next;
> >>  		kfree_skb(skb);
> >> +		skb = next;
> >> +	}  
> >
> > It would be cleaner if you could use
> > skb_queue_walk_safe rather than open coding the loop.
> >
> > 	skb_queue_walk_safe(&head, skb,  tmp)
> > 		kfree_skb(skb);  
> 
> I appreciate abstraction as much as anybody, but I do not believe
> that such abstractions would actually be an improvement here.
> 
> * Splice-initing seems more like an idiom than an abstraction;
>   at first blush, it wouldn't be clear to me what the intention
>   is.
> 
> * Such abstractions are fairly unnecessary.
> 
>     * The function as written is already so short as to be
>       easily digested.
> 
>     * More to the point, this function is not some generic,
>       higher-level algorithm that just happens to employ the
>       socket buffer interface; rather, it is a function that
>       implements part of that very interface, and may thus
>       twiddle the intimate bits of these data structures
>       without being accused of abusing a leaky abstraction.
> 
> * Such abstractions add overhead, if only conceptually. In this
>   case, a temporary socket buffer queue allocates *3* unnecessary
>   struct members, including a whole `spinlock_t' member:
>   
>     prev
>     qlen
>     lock
> 
>   It's possible that the compiler will be smart enough to leave
>   those out, but I have my suspicions that it won't, not only
>   given that the interface contract requires that the temporary
>   socket buffer queue be properly initialized before use, but
>   also because splicing into the temporary will manipulate its
>   `qlen'. Yet, why worry whether optimization happens? The whole
>   issue can simply be avoided by exploiting the intimate details
>   that are already philosophically available to us.
> 
>   Similarly, the function `skb_queue_walk_safe' is nice, but it
>   loses value both because a temporary queue loses value (as just
>   described), and because it ignores the fact that legitimate
>   access to the internals of these data structures allows for
>   setting up the requested loop in advance; that is to say, the
>   two parts of the function that we are now debating can be woven
>   together more tightly than `skb_queue_walk_safe' allows.
> 
> For these reasons, I stand by the way that the patch currently
> implements this function; it does exactly what is desired, no more
> or less.
> 
> Sincerely,
> Michael Witten

The point is that there was discussion in the past of replacing
the next/prev as used in skb with more generic code from list.h.
If the abstraction was used, then this code would just work.

The temporary skb_buff_head is on the stack, and any
access to updating those fields like qlen are in CPU cache
and therefore have very little impact on any peformance.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Please apply these tiny, 4-month-old patches.
  2017-10-01 22:19 ` [PATCH net 0/3] net: TCP/IP: A few minor cleanups Michael Witten
  2017-10-01 22:19   ` [PATCH net 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
  2017-10-01 22:19   ` [PATCH net 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation Michael Witten
@ 2018-02-06  0:54   ` Michael Witten
  2018-02-06  1:12     ` David Miller
  2 siblings, 1 reply; 21+ messages in thread
From: Michael Witten @ 2018-02-06  0:54 UTC (permalink / raw)
  To: David S. Miller, Alexey Kuznetsov, Hideaki YOSHIFUJI
  Cc: Stephen Hemminger, Eric Dumazet, netdev, linux-kernel

Strictly speaking, these patches streamline the code at both 
compile-time and run-time, and seem to face no technical
objection of note.

The strongest objection was a dubious *potential* refactoring of
similar code, a refactoring which is clearly vaporware, and which
I myself would have tried to complete if only this initial foray
had been applied.

If this is considered "new" code (it isn't) and if this email is
received outside of an appropriate merge window, then save this
email for later consideration---this isn't a real-time conversation;
this is email, so it doesn't matter when you receive it.

Sincerely,
Michael Witten

On Sun, 01 Oct 2017 22:19:02 -0000, Michael Witten wrote:

> The following patch series is an ad hoc "cleanup" that I made
> while perusing the code (I'm not well versed in this code, so I
> would not be surprised if there were objections to the changes):
>
>   [1] net: __sock_cmsg_send(): Remove unused parameter `msg'
>   [2] net: inet_recvmsg(): Remove unnecessary bitwise operation
>   [3] net: skb_queue_purge(): lock/unlock the queue only once
>
> Each patch will be sent as an individual reply to this email;
> the total diff is appended below for your convenience.
>
> You may also fetch these patches from GitHub:
>
>   git checkout --detach 5969d1bb3082b41eba8fd2c826559abe38ccb6df
>   git pull https://github.com/mfwitten/linux.git net/tcp-ip/01-cleanup/02
>
> Overall:
>
>   include/net/sock.h     |  2 +-
>   net/core/skbuff.c      | 26 ++++++++++++++++++--------
>   net/core/sock.c        |  4 ++--
>   net/ipv4/af_inet.c     |  2 +-
>   net/ipv4/ip_sockglue.c |  2 +-
>   net/ipv6/datagram.c    |  2 +-
>   6 files changed, 24 insertions(+), 14 deletions(-)
>
> Sincerly,
> Michael Witten
>
> diff --git a/include/net/sock.h b/include/net/sock.h
> index 03a362568357..83373d7148a9 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -1562,7 +1562,7 @@ struct sockcm_cookie {
>  	u16 tsflags;
>  };
>  
> -int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
> +int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
>  		     struct sockcm_cookie *sockc);
>  int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
>  		   struct sockcm_cookie *sockc);
> diff --git a/net/core/sock.c b/net/core/sock.c
> index 9b7b6bbb2a23..425e03fe1c56 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -2091,7 +2091,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
>  }
>  EXPORT_SYMBOL(sock_alloc_send_skb);
>  
> -int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
> +int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
>  		     struct sockcm_cookie *sockc)
>  {
>  	u32 tsflags;
> @@ -2137,7 +2137,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
>  			return -EINVAL;
>  		if (cmsg->cmsg_level != SOL_SOCKET)
>  			continue;
> -		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
> +		ret = __sock_cmsg_send(sk, cmsg, sockc);
>  		if (ret)
>  			return ret;
>  	}
> diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
> index e558e4f9597b..c79b7822b0b9 100644
> --- a/net/ipv4/ip_sockglue.c
> +++ b/net/ipv4/ip_sockglue.c
> @@ -263,7 +263,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
>  		}
>  #endif
>  		if (cmsg->cmsg_level == SOL_SOCKET) {
> -			err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
> +			err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
>  			if (err)
>  				return err;
>  			continue;
> diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
> index a1f918713006..1d1926a4cbe2 100644
> --- a/net/ipv6/datagram.c
> +++ b/net/ipv6/datagram.c
> @@ -756,7 +756,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
>  		}
>  
>  		if (cmsg->cmsg_level == SOL_SOCKET) {
> -			err = __sock_cmsg_send(sk, msg, cmsg, sockc);
> +			err = __sock_cmsg_send(sk, cmsg, sockc);
>  			if (err)
>  				return err;
>  			continue;
> diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
> index e31108e5ef79..2dbed042a412 100644
> --- a/net/ipv4/af_inet.c
> +++ b/net/ipv4/af_inet.c
> @@ -791,7 +791,7 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
>  	sock_rps_record_flow(sk);
>  
>  	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
> -				   flags & ~MSG_DONTWAIT, &addr_len);
> +				   flags, &addr_len);
>  	if (err >= 0)
>  		msg->msg_namelen = addr_len;
>  	return err;
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 68065d7d383f..bd26b0bde784 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -2825,18 +2825,28 @@
>  EXPORT_SYMBOL(skb_dequeue_tail);
>  
>  /**
> - *	skb_queue_purge - empty a list
> - *	@list: list to empty
> + *	skb_queue_purge - empty a queue
> + *	@q: the queue to empty
>   *
> - *	Delete all buffers on an &sk_buff list. Each buffer is removed from
> - *	the list and one reference dropped. This function takes the list
> - *	lock and is atomic with respect to other list locking functions.
> + *	Dequeue and free each socket buffer that is in @q.
> + *
> + *	This function is atomic with respect to other queue-locking functions.
>   */
> -void skb_queue_purge(struct sk_buff_head *list)
> +void skb_queue_purge(struct sk_buff_head *q)
>  {
> -	struct sk_buff *skb;
> -	while ((skb = skb_dequeue(list)) != NULL)
> +	unsigned long flags;
> +	struct sk_buff *skb, *next, *head = (struct sk_buff *)q;
> +
> +	spin_lock_irqsave(&q->lock, flags);
> +	skb = q->next;
> +	__skb_queue_head_init(q);
> +	spin_unlock_irqrestore(&q->lock, flags);
> +
> +	while (skb != head) {
> +		next = skb->next;
>  		kfree_skb(skb);
> +		skb = next;
> +	}
>  }
>  EXPORT_SYMBOL(skb_queue_purge);
>  
> -- 
> 2.14.1
>

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: Please apply these tiny, 4-month-old patches.
  2018-02-06  0:54   ` Please apply these tiny, 4-month-old patches Michael Witten
@ 2018-02-06  1:12     ` David Miller
  2018-02-06  1:31       ` Michael Witten
  0 siblings, 1 reply; 21+ messages in thread
From: David Miller @ 2018-02-06  1:12 UTC (permalink / raw)
  To: mfwitten; +Cc: kuznet, yoshfuji, stephen, eric.dumazet, netdev, linux-kernel

From: Michael Witten <mfwitten@gmail.com>
Date: Tue, 06 Feb 2018 00:54:35 -0000

> If this is considered "new" code (it isn't) and if this email is
> received outside of an appropriate merge window, then save this
> email for later consideration---this isn't a real-time conversation;
> this is email, so it doesn't matter when you receive it.

Sorry, things don't work that way.

You must submit your changes at the appropriate time.

Please learn how the community works, and how to interact with
developers and maintainers in that community appropriately.

Thank you.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: Please apply these tiny, 4-month-old patches.
  2018-02-06  1:12     ` David Miller
@ 2018-02-06  1:31       ` Michael Witten
  2018-02-06  1:42         ` Andrew Lunn
  0 siblings, 1 reply; 21+ messages in thread
From: Michael Witten @ 2018-02-06  1:31 UTC (permalink / raw)
  To: David Miller
  Cc: kuznet, yoshfuji, stephen, eric.dumazet, netdev, linux-kernel

On Mon, 05 Feb 2018 20:12:11 -0500 (EST), David Miller wrote:

>> If this is considered "new" code (it isn't) and if this email is
>> received outside of an appropriate merge window, then save this
>> email for later consideration---this isn't a real-time conversation;
>> this is email, so it doesn't matter when you receive it.
>
> Sorry, things don't work that way.
>
> You must submit your changes at the appropriate time.
>
> Please learn how the community works, and how to interact with
> developers and maintainers in that community appropriately.

I already tried that.

If you're unwilling to be an effective maintainer, then please hand
off the responsibiilty to someone else.

Sincerely,
Michael Witten

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: Please apply these tiny, 4-month-old patches.
  2018-02-06  1:31       ` Michael Witten
@ 2018-02-06  1:42         ` Andrew Lunn
  2018-02-06  2:19           ` Michael Witten
  0 siblings, 1 reply; 21+ messages in thread
From: Andrew Lunn @ 2018-02-06  1:42 UTC (permalink / raw)
  To: Michael Witten
  Cc: David Miller, kuznet, yoshfuji, stephen, eric.dumazet, netdev,
	linux-kernel

> > Please learn how the community works, and how to interact with
> > developers and maintainers in that community appropriately.
> 
> I already tried that.
> 
> If you're unwilling to be an effective maintainer, then please hand
> off the responsibiilty to someone else.

Could i suggest you read:

https://www.kernel.org/doc/Documentation/networking/netdev-FAQ.txt

And in particular, the bit about netdev being closed.

I suggest you wait a week for netdev to open, and then submit the
patches again. Actual patches, which cleanly apply to net-next.

  Andrew

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: Please apply these tiny, 4-month-old patches.
  2018-02-06  1:42         ` Andrew Lunn
@ 2018-02-06  2:19           ` Michael Witten
  2018-02-06 12:58             ` Andrew Lunn
  0 siblings, 1 reply; 21+ messages in thread
From: Michael Witten @ 2018-02-06  2:19 UTC (permalink / raw)
  To: Andrew Lunn
  Cc: David Miller, kuznet, yoshfuji, stephen, eric.dumazet, netdev,
	linux-kernel

On Tue, 6 Feb 2018 02:42:17 +0100, Andrew Lunn wrote:

>>> Please learn how the community works, and how to interact with
>>> developers and maintainers in that community appropriately.
>>
>> I already tried that.
>>
>> If you're unwilling to be an effective maintainer, then please hand
>> off the responsibiilty to someone else.
>
> Could i suggest you read:
>
> https://www.kernel.org/doc/Documentation/networking/netdev-FAQ.txt
>
> And in particular, the bit about netdev being closed.
>
> I suggest you wait a week for netdev to open, and then submit the
> patches again. Actual patches, which cleanly apply to net-next.

Thank you kindly for the suggestion.

However, I'm fully versed on the scripture.

I'm glad to report that the patches apply cleanly to `net-next'; the
actual patches are, of course, still available in my previous emails.
Also, as already described, they can be easily fetched from GitHub.

Sincerely,
Michael Witten

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: Please apply these tiny, 4-month-old patches.
  2018-02-06  2:19           ` Michael Witten
@ 2018-02-06 12:58             ` Andrew Lunn
  0 siblings, 0 replies; 21+ messages in thread
From: Andrew Lunn @ 2018-02-06 12:58 UTC (permalink / raw)
  To: Michael Witten
  Cc: David Miller, kuznet, yoshfuji, stephen, eric.dumazet, netdev,
	linux-kernel

> However, I'm fully versed on the scripture.
> 
> I'm glad to report that the patches apply cleanly to `net-next'; the
> actual patches are, of course, still available in my previous emails.
> Also, as already described, they can be easily fetched from GitHub.
> 
> Sincerely,
> Michael Witten

Hi Michael

If you were fully versed in the scriptures, you would know none of
these methods for submitting patches are applicable for netdev. Please
follow the process, repost the patches in a weeks time.

       Andrew

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2018-02-06 12:59 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-08  5:05 [PATCH 0/3] net: TCP/IP: A few minor cleanups Michael Witten
2017-09-08  5:05 ` [PATCH 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
2017-09-08  5:06 ` [PATCH 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation Michael Witten
2017-09-08  5:06 ` [PATCH 3/3] net: skb_queue_purge(): lock/unlock the list only once Michael Witten
2017-09-08 16:01   ` Eric Dumazet
2017-09-08 16:51   ` Stephen Hemminger
2017-09-09  5:50   ` [PATCH v1 3/3] net: skb_queue_purge(): lock/unlock the queue " Michael Witten
2017-09-09 16:52     ` Eric Dumazet
2017-10-01 22:19   ` [PATCH net " Michael Witten
2017-10-02  0:59     ` Stephen Hemminger
2017-10-02  5:15       ` Michael Witten
2017-10-02 14:55         ` Stephen Hemminger
2017-10-01 22:19 ` [PATCH net 0/3] net: TCP/IP: A few minor cleanups Michael Witten
2017-10-01 22:19   ` [PATCH net 1/3] net: __sock_cmsg_send(): Remove unused parameter `msg' Michael Witten
2017-10-01 22:19   ` [PATCH net 2/3] net: inet_recvmsg(): Remove unnecessary bitwise operation Michael Witten
2018-02-06  0:54   ` Please apply these tiny, 4-month-old patches Michael Witten
2018-02-06  1:12     ` David Miller
2018-02-06  1:31       ` Michael Witten
2018-02-06  1:42         ` Andrew Lunn
2018-02-06  2:19           ` Michael Witten
2018-02-06 12:58             ` Andrew Lunn

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).