All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] gro: optimise redundant parsing of packets
@ 2023-01-30 13:00 Richard Gobert
  2023-01-30 13:05 ` [PATCH 1/2] gro: decrease size of CB Richard Gobert
  2023-01-30 13:07 ` [PATCH 2/2] gro: optimise redundant parsing of packets Richard Gobert
  0 siblings, 2 replies; 7+ messages in thread
From: Richard Gobert @ 2023-01-30 13:00 UTC (permalink / raw)
  To: davem, edumazet, kuba, pabeni, yoshfuji, dsahern,
	steffen.klassert, lixiaoyan, alexanderduyck, leon, ye.xingchen,
	iwienand, netdev, linux-kernel

Currently, the IPv6 extension headers are parsed twice: first in
ipv6_gro_receive, and then again in ipv6_gro_complete.

The field NAPI_GRO_CB(skb)->proto is used by GRO to hold the layer 4
protocol type that comes after the IPv6 layer. I noticed that it is set
in ipv6_gro_receive, but isn't used anywhere. By using this field, and
also storing the size of the network header, we can avoid parsing
extension headers a second time in ipv6_gro_complete.

The first commit frees up space in the GRO CB. The second commit reduces
the redundant parsing during the complete phase, using the freed CB
space.

I've applied this optimisation to all base protocols (IPv6, IPv4,
Ethernet). Then, I benchmarked this patch on my machine, using ftrace to
measure ipv6_gro_complete's performance, and there was an improvement.

Richard Gobert (2):
  gro: decrease size of CB
  gro: optimise redundant parsing of packets

 include/net/gro.h      | 32 +++++++++++++++++++++-----------
 net/core/gro.c         | 18 +++++++++++-------
 net/ethernet/eth.c     | 11 +++++++++--
 net/ipv4/af_inet.c     |  8 +++++++-
 net/ipv6/ip6_offload.c | 15 ++++++++++++---
 5 files changed, 60 insertions(+), 24 deletions(-)

-- 
2.36.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/2] gro: decrease size of CB
  2023-01-30 13:00 [PATCH 0/2] gro: optimise redundant parsing of packets Richard Gobert
@ 2023-01-30 13:05 ` Richard Gobert
  2023-01-30 13:07 ` [PATCH 2/2] gro: optimise redundant parsing of packets Richard Gobert
  1 sibling, 0 replies; 7+ messages in thread
From: Richard Gobert @ 2023-01-30 13:05 UTC (permalink / raw)
  To: davem, edumazet, kuba, pabeni, yoshfuji, dsahern,
	steffen.klassert, lixiaoyan, alexanderduyck, leon, ye.xingchen,
	iwienand, netdev, linux-kernel

The GRO control block (NAPI_GRO_CB) is currently at its maximum size.
This commit reduces its size by putting two groups of fields that are
used only at different times into a union.

Specifically, the fields frag0 and frag0_len are the fields that make up
the frag0 optimisation mechanism, which is used during the initial
parsing of the SKB.

The fields last and age are used after the initial parsing, while the
SKB is stored in the GRO list, waiting for other packets to arrive.

There was one location in dev_gro_receive that modified the frag0 fields
after setting last and age. I changed this accordingly without altering
the code behaviour.

Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
---
 include/net/gro.h | 26 ++++++++++++++++----------
 net/core/gro.c    | 18 +++++++++++-------
 2 files changed, 27 insertions(+), 17 deletions(-)

diff --git a/include/net/gro.h b/include/net/gro.h
index a4fab706240d..7b47dd6ce94f 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -11,11 +11,23 @@
 #include <net/udp.h>
 
 struct napi_gro_cb {
-	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
-	void	*frag0;
+	union {
+		struct {
+			/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+			void	*frag0;
 
-	/* Length of frag0. */
-	unsigned int frag0_len;
+			/* Length of frag0. */
+			unsigned int frag0_len;
+		};
+
+		struct {
+			/* used in skb_gro_receive() slow path */
+			struct sk_buff *last;
+
+			/* jiffies when first packet was created/queued */
+			unsigned long age;
+		};
+	};
 
 	/* This indicates where we are processing relative to skb->data. */
 	int	data_offset;
@@ -32,9 +44,6 @@ struct napi_gro_cb {
 	/* Used in ipv6_gro_receive() and foo-over-udp */
 	u16	proto;
 
-	/* jiffies when first packet was created/queued */
-	unsigned long age;
-
 /* Used in napi_gro_cb::free */
 #define NAPI_GRO_FREE             1
 #define NAPI_GRO_FREE_STOLEN_HEAD 2
@@ -77,9 +86,6 @@ struct napi_gro_cb {
 
 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 	__wsum	csum;
-
-	/* used in skb_gro_receive() slow path */
-	struct sk_buff *last;
 };
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
diff --git a/net/core/gro.c b/net/core/gro.c
index 506f83d715f8..869823d9e8bc 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -449,6 +449,14 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
 	}
 }
 
+static inline void gro_try_pull_from_frag0(struct sk_buff *skb)
+{
+	int grow = skb_gro_offset(skb) - skb_headlen(skb);
+
+	if (grow > 0)
+		gro_pull_from_frag0(skb, grow);
+}
+
 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
 {
 	struct sk_buff *oldest;
@@ -478,7 +486,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 	struct sk_buff *pp = NULL;
 	enum gro_result ret;
 	int same_flow;
-	int grow;
 
 	if (netif_elide_gro(skb->dev))
 		goto normal;
@@ -553,17 +560,13 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 	else
 		gro_list->count++;
 
+	gro_try_pull_from_frag0(skb);
 	NAPI_GRO_CB(skb)->age = jiffies;
 	NAPI_GRO_CB(skb)->last = skb;
 	if (!skb_is_gso(skb))
 		skb_shinfo(skb)->gso_size = skb_gro_len(skb);
 	list_add(&skb->list, &gro_list->list);
 	ret = GRO_HELD;
-
-pull:
-	grow = skb_gro_offset(skb) - skb_headlen(skb);
-	if (grow > 0)
-		gro_pull_from_frag0(skb, grow);
 ok:
 	if (gro_list->count) {
 		if (!test_bit(bucket, &napi->gro_bitmask))
@@ -576,7 +579,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 
 normal:
 	ret = GRO_NORMAL;
-	goto pull;
+	gro_try_pull_from_frag0(skb);
+	goto ok;
 }
 
 struct packet_offload *gro_find_receive_by_type(__be16 type)
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] gro: optimise redundant parsing of packets
  2023-01-30 13:00 [PATCH 0/2] gro: optimise redundant parsing of packets Richard Gobert
  2023-01-30 13:05 ` [PATCH 1/2] gro: decrease size of CB Richard Gobert
@ 2023-01-30 13:07 ` Richard Gobert
  2023-01-30 15:40   ` Alexander Lobakin
  2023-01-30 17:39   ` Eric Dumazet
  1 sibling, 2 replies; 7+ messages in thread
From: Richard Gobert @ 2023-01-30 13:07 UTC (permalink / raw)
  To: davem, edumazet, kuba, pabeni, yoshfuji, dsahern,
	steffen.klassert, lixiaoyan, alexanderduyck, leon, ye.xingchen,
	iwienand, netdev, linux-kernel

Currently, the IPv6 extension headers are parsed twice: first in
ipv6_gro_receive, and then again in ipv6_gro_complete.

The field NAPI_GRO_CB(skb)->proto is used by GRO to hold the layer 4
protocol type that comes after the IPv6 layer. I noticed that it is set
in ipv6_gro_receive, but isn't used anywhere. By using this field, and
also storing the size of the network header, we can avoid parsing
extension headers a second time in ipv6_gro_complete.

The implementation had to handle both inner and outer layers in case of
encapsulation (as they can't use the same field).

I've applied this optimisation to all base protocols (IPv6, IPv4,
Ethernet). Then, I benchmarked this patch on my machine, using ftrace to
measure ipv6_gro_complete's performance, and there was an improvement.

Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
---
 include/net/gro.h      |  8 ++++++--
 net/ethernet/eth.c     | 11 +++++++++--
 net/ipv4/af_inet.c     |  8 +++++++-
 net/ipv6/ip6_offload.c | 15 ++++++++++++---
 4 files changed, 34 insertions(+), 8 deletions(-)

diff --git a/include/net/gro.h b/include/net/gro.h
index 7b47dd6ce94f..d364616cb930 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -41,8 +41,8 @@ struct napi_gro_cb {
 	/* Number of segments aggregated. */
 	u16	count;
 
-	/* Used in ipv6_gro_receive() and foo-over-udp */
-	u16	proto;
+	/* Used in eth_gro_receive() */
+	__be16	network_proto;
 
 /* Used in napi_gro_cb::free */
 #define NAPI_GRO_FREE             1
@@ -86,6 +86,10 @@ struct napi_gro_cb {
 
 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 	__wsum	csum;
+
+	/* Used in inet and ipv6 _gro_receive() */
+	u16	network_len;
+	u8	transport_proto;
 };
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 2edc8b796a4e..d68ad90f0a9e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -439,6 +439,9 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
 		goto out;
 	}
 
+	if (!NAPI_GRO_CB(skb)->encap_mark)
+		NAPI_GRO_CB(skb)->network_proto = type;
+
 	skb_gro_pull(skb, sizeof(*eh));
 	skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
 
@@ -456,12 +459,16 @@ EXPORT_SYMBOL(eth_gro_receive);
 int eth_gro_complete(struct sk_buff *skb, int nhoff)
 {
 	struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
-	__be16 type = eh->h_proto;
+	__be16 type;
 	struct packet_offload *ptype;
 	int err = -ENOSYS;
 
-	if (skb->encapsulation)
+	if (skb->encapsulation) {
 		skb_set_inner_mac_header(skb, nhoff);
+		type = eh->h_proto;
+	} else {
+		type = NAPI_GRO_CB(skb)->network_proto;
+	}
 
 	ptype = gro_find_complete_by_type(type);
 	if (ptype != NULL)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6c0ec2789943..4401af7b3a15 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1551,6 +1551,9 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
 	 * immediately following this IP hdr.
 	 */
 
+	if (!NAPI_GRO_CB(skb)->encap_mark)
+		NAPI_GRO_CB(skb)->transport_proto = proto;
+
 	/* Note : No need to call skb_gro_postpull_rcsum() here,
 	 * as we already checked checksum over ipv4 header was 0
 	 */
@@ -1621,12 +1624,15 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
 	__be16 newlen = htons(skb->len - nhoff);
 	struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
 	const struct net_offload *ops;
-	int proto = iph->protocol;
+	int proto;
 	int err = -ENOSYS;
 
 	if (skb->encapsulation) {
 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
 		skb_set_inner_network_header(skb, nhoff);
+		proto = iph->protocol;
+	} else {
+		proto = NAPI_GRO_CB(skb)->transport_proto;
 	}
 
 	csum_replace2(&iph->check, iph->tot_len, newlen);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 00dc2e3b0184..79ba5882f576 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -227,11 +227,14 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
 		iph = ipv6_hdr(skb);
 	}
 
-	NAPI_GRO_CB(skb)->proto = proto;
-
 	flush--;
 	nlen = skb_network_header_len(skb);
 
+	if (!NAPI_GRO_CB(skb)->encap_mark) {
+		NAPI_GRO_CB(skb)->transport_proto = proto;
+		NAPI_GRO_CB(skb)->network_len = nlen;
+	}
+
 	list_for_each_entry(p, head, list) {
 		const struct ipv6hdr *iph2;
 		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
@@ -358,7 +361,13 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
 		iph->payload_len = htons(payload_len);
 	}
 
-	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
+	if (!skb->encapsulation) {
+		ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->transport_proto]);
+		nhoff += NAPI_GRO_CB(skb)->network_len;
+	} else {
+		nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
+	}
+
 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
 		goto out;
 
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] gro: optimise redundant parsing of packets
  2023-01-30 13:07 ` [PATCH 2/2] gro: optimise redundant parsing of packets Richard Gobert
@ 2023-01-30 15:40   ` Alexander Lobakin
  2023-02-22 14:47     ` Richard Gobert
  2023-01-30 17:39   ` Eric Dumazet
  1 sibling, 1 reply; 7+ messages in thread
From: Alexander Lobakin @ 2023-01-30 15:40 UTC (permalink / raw)
  To: Richard Gobert
  Cc: davem, edumazet, kuba, pabeni, yoshfuji, dsahern,
	steffen.klassert, lixiaoyan, alexanderduyck, leon, ye.xingchen,
	iwienand, netdev, linux-kernel

From: Richard Gobert <richardbgobert@gmail.com>
Date: Mon, 30 Jan 2023 14:07:55 +0100

> Currently, the IPv6 extension headers are parsed twice: first in
> ipv6_gro_receive, and then again in ipv6_gro_complete.
> 
> The field NAPI_GRO_CB(skb)->proto is used by GRO to hold the layer 4
> protocol type that comes after the IPv6 layer. I noticed that it is set
> in ipv6_gro_receive, but isn't used anywhere. By using this field, and
> also storing the size of the network header, we can avoid parsing
> extension headers a second time in ipv6_gro_complete.
> 
> The implementation had to handle both inner and outer layers in case of
> encapsulation (as they can't use the same field).
> 
> I've applied this optimisation to all base protocols (IPv6, IPv4,
> Ethernet). Then, I benchmarked this patch on my machine, using ftrace to
> measure ipv6_gro_complete's performance, and there was an improvement.

Would be nice to see some perf numbers. "there was an improvement"
doesn't say a lot TBH...

> 
> Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
> ---
>  include/net/gro.h      |  8 ++++++--
>  net/ethernet/eth.c     | 11 +++++++++--
>  net/ipv4/af_inet.c     |  8 +++++++-
>  net/ipv6/ip6_offload.c | 15 ++++++++++++---
>  4 files changed, 34 insertions(+), 8 deletions(-)

[...]

> @@ -456,12 +459,16 @@ EXPORT_SYMBOL(eth_gro_receive);
>  int eth_gro_complete(struct sk_buff *skb, int nhoff)
>  {
>  	struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
> -	__be16 type = eh->h_proto;
> +	__be16 type;

Please don't break RCT style when shortening/expanding variable
declaration lines.

>  	struct packet_offload *ptype;
>  	int err = -ENOSYS;
>  
> -	if (skb->encapsulation)
> +	if (skb->encapsulation) {
>  		skb_set_inner_mac_header(skb, nhoff);
> +		type = eh->h_proto;
> +	} else {
> +		type = NAPI_GRO_CB(skb)->network_proto;
> +	}
>  
>  	ptype = gro_find_complete_by_type(type);
>  	if (ptype != NULL)
> diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
> index 6c0ec2789943..4401af7b3a15 100644
> --- a/net/ipv4/af_inet.c
> +++ b/net/ipv4/af_inet.c
> @@ -1551,6 +1551,9 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
>  	 * immediately following this IP hdr.
>  	 */
>  
> +	if (!NAPI_GRO_CB(skb)->encap_mark)
> +		NAPI_GRO_CB(skb)->transport_proto = proto;
> +
>  	/* Note : No need to call skb_gro_postpull_rcsum() here,
>  	 * as we already checked checksum over ipv4 header was 0
>  	 */
> @@ -1621,12 +1624,15 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
>  	__be16 newlen = htons(skb->len - nhoff);
>  	struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
>  	const struct net_offload *ops;
> -	int proto = iph->protocol;
> +	int proto;

(same)

>  	int err = -ENOSYS;
>  
>  	if (skb->encapsulation) {
>  		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
>  		skb_set_inner_network_header(skb, nhoff);
> +		proto = iph->protocol;
> +	} else {
> +		proto = NAPI_GRO_CB(skb)->transport_proto;
>  	}
>  
>  	csum_replace2(&iph->check, iph->tot_len, newlen);

[...]

> @@ -358,7 +361,13 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
>  		iph->payload_len = htons(payload_len);
>  	}
>  
> -	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
> +	if (!skb->encapsulation) {
> +		ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->transport_proto]);
> +		nhoff += NAPI_GRO_CB(skb)->network_len;

Why not use the same skb_network_header_len() here? Both
skb->network_header and skb->transport_header must be set and correct at
this point (if not, you can always fix that).

> +	} else {
> +		nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
> +	}
> +
>  	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
>  		goto out;
> 

Thanks,
Olek

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] gro: optimise redundant parsing of packets
  2023-01-30 13:07 ` [PATCH 2/2] gro: optimise redundant parsing of packets Richard Gobert
  2023-01-30 15:40   ` Alexander Lobakin
@ 2023-01-30 17:39   ` Eric Dumazet
  2023-02-22 14:35     ` Richard Gobert
  1 sibling, 1 reply; 7+ messages in thread
From: Eric Dumazet @ 2023-01-30 17:39 UTC (permalink / raw)
  To: Richard Gobert
  Cc: davem, kuba, pabeni, yoshfuji, dsahern, steffen.klassert,
	lixiaoyan, alexanderduyck, leon, ye.xingchen, iwienand, netdev,
	linux-kernel

On Mon, Jan 30, 2023 at 2:08 PM Richard Gobert <richardbgobert@gmail.com> wrote:
>
> Currently, the IPv6 extension headers are parsed twice: first in
> ipv6_gro_receive, and then again in ipv6_gro_complete.
>
> The field NAPI_GRO_CB(skb)->proto is used by GRO to hold the layer 4
> protocol type that comes after the IPv6 layer. I noticed that it is set
> in ipv6_gro_receive, but isn't used anywhere. By using this field, and
> also storing the size of the network header, we can avoid parsing
> extension headers a second time in ipv6_gro_complete.
>
> The implementation had to handle both inner and outer layers in case of
> encapsulation (as they can't use the same field).
>
> I've applied this optimisation to all base protocols (IPv6, IPv4,
> Ethernet). Then, I benchmarked this patch on my machine, using ftrace to
> measure ipv6_gro_complete's performance, and there was an improvement.

It seems your patch adds a lot of conditional checks, which will
alternate true/false
for encapsulated protocols.

So please give us raw numbers, ftrace is too heavy weight for such claims.



>
> Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
> ---
>  include/net/gro.h      |  8 ++++++--
>  net/ethernet/eth.c     | 11 +++++++++--
>  net/ipv4/af_inet.c     |  8 +++++++-
>  net/ipv6/ip6_offload.c | 15 ++++++++++++---
>  4 files changed, 34 insertions(+), 8 deletions(-)
>
> diff --git a/include/net/gro.h b/include/net/gro.h
> index 7b47dd6ce94f..d364616cb930 100644
> --- a/include/net/gro.h
> +++ b/include/net/gro.h
> @@ -41,8 +41,8 @@ struct napi_gro_cb {
>         /* Number of segments aggregated. */
>         u16     count;
>
> -       /* Used in ipv6_gro_receive() and foo-over-udp */
> -       u16     proto;
> +       /* Used in eth_gro_receive() */
> +       __be16  network_proto;
>
>  /* Used in napi_gro_cb::free */
>  #define NAPI_GRO_FREE             1
> @@ -86,6 +86,10 @@ struct napi_gro_cb {
>
>         /* used to support CHECKSUM_COMPLETE for tunneling protocols */
>         __wsum  csum;
> +
> +       /* Used in inet and ipv6 _gro_receive() */
> +       u16     network_len;
> +       u8      transport_proto;
>  };
>
>  #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
> diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
> index 2edc8b796a4e..d68ad90f0a9e 100644
> --- a/net/ethernet/eth.c
> +++ b/net/ethernet/eth.c
> @@ -439,6 +439,9 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
>                 goto out;
>         }
>
> +       if (!NAPI_GRO_CB(skb)->encap_mark)
> +               NAPI_GRO_CB(skb)->network_proto = type;
> +
>         skb_gro_pull(skb, sizeof(*eh));
>         skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
>
> @@ -456,12 +459,16 @@ EXPORT_SYMBOL(eth_gro_receive);
>  int eth_gro_complete(struct sk_buff *skb, int nhoff)
>  {
>         struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);

Why initializing @eh here is needed ?
Presumably, for !skb->encapsulation, @eh would not be used.

> -       __be16 type = eh->h_proto;
> +       __be16 type;
>         struct packet_offload *ptype;
>         int err = -ENOSYS;
>
> -       if (skb->encapsulation)
> +       if (skb->encapsulation) {
>                 skb_set_inner_mac_header(skb, nhoff);
> +               type = eh->h_proto;
> +       } else {
> +               type = NAPI_GRO_CB(skb)->network_proto;
> +       }
>
>         ptype = gro_find_complete_by_type(type);
>         if (ptype != NULL)
> diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
> index 6c0ec2789943..4401af7b3a15 100644
> --- a/net/ipv4/af_inet.c
> +++ b/net/ipv4/af_inet.c
> @@ -1551,6 +1551,9 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
>          * immediately following this IP hdr.
>          */
>
> +       if (!NAPI_GRO_CB(skb)->encap_mark)
> +               NAPI_GRO_CB(skb)->transport_proto = proto;
> +
>         /* Note : No need to call skb_gro_postpull_rcsum() here,
>          * as we already checked checksum over ipv4 header was 0
>          */
> @@ -1621,12 +1624,15 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
>         __be16 newlen = htons(skb->len - nhoff);
>         struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
>         const struct net_offload *ops;
> -       int proto = iph->protocol;
> +       int proto;
>         int err = -ENOSYS;
>
>         if (skb->encapsulation) {
>                 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
>                 skb_set_inner_network_header(skb, nhoff);
> +               proto = iph->protocol;
> +       } else {
> +               proto = NAPI_GRO_CB(skb)->transport_proto;

I really doubt this change is needed.
We need to access iph->fields in the following lines.
Adding an else {} branch is adding extra code, and makes your patch
longer to review.

>         }
>
>         csum_replace2(&iph->check, iph->tot_len, newlen);
> diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
> index 00dc2e3b0184..79ba5882f576 100644
> --- a/net/ipv6/ip6_offload.c
> +++ b/net/ipv6/ip6_offload.c
> @@ -227,11 +227,14 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
>                 iph = ipv6_hdr(skb);
>         }
>
> -       NAPI_GRO_CB(skb)->proto = proto;

I guess you missed BIG  TCP ipv4 changes under review... ->proto is now used.

> -
>         flush--;
>         nlen = skb_network_header_len(skb);
>
> +       if (!NAPI_GRO_CB(skb)->encap_mark) {
> +               NAPI_GRO_CB(skb)->transport_proto = proto;
> +               NAPI_GRO_CB(skb)->network_len = nlen;
> +       }
> +
>         list_for_each_entry(p, head, list) {
>                 const struct ipv6hdr *iph2;
>                 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
> @@ -358,7 +361,13 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
>                 iph->payload_len = htons(payload_len);
>         }
>
> -       nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
> +       if (!skb->encapsulation) {
> +               ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->transport_proto]);
> +               nhoff += NAPI_GRO_CB(skb)->network_len;
> +       } else {
> +               nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);

IMO ipv6_exthdrs_len() is quite fast for the typical case where we
have no extension headers.

This new conditional check seems expensive to me.



> +       }
> +
>         if (WARN_ON(!ops || !ops->callbacks.gro_complete))
>                 goto out;
>
> --
> 2.36.1
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] gro: optimise redundant parsing of packets
  2023-01-30 17:39   ` Eric Dumazet
@ 2023-02-22 14:35     ` Richard Gobert
  0 siblings, 0 replies; 7+ messages in thread
From: Richard Gobert @ 2023-02-22 14:35 UTC (permalink / raw)
  To: Eric Dumazet
  Cc: davem, kuba, pabeni, yoshfuji, dsahern, steffen.klassert,
	lixiaoyan, alexanderduyck, leon, ye.xingchen, iwienand, netdev,
	linux-kernel

> >
> > Currently, the IPv6 extension headers are parsed twice: first in
> > ipv6_gro_receive, and then again in ipv6_gro_complete.
> >
> > The field NAPI_GRO_CB(skb)->proto is used by GRO to hold the layer 4
> > protocol type that comes after the IPv6 layer. I noticed that it is set
> > in ipv6_gro_receive, but isn't used anywhere. By using this field, and
> > also storing the size of the network header, we can avoid parsing
> > extension headers a second time in ipv6_gro_complete.
> >
> > The implementation had to handle both inner and outer layers in case of
> > encapsulation (as they can't use the same field).
> >
> > I've applied this optimisation to all base protocols (IPv6, IPv4,
> > Ethernet). Then, I benchmarked this patch on my machine, using ftrace to
> > measure ipv6_gro_complete's performance, and there was an improvement.
> 
> It seems your patch adds a lot of conditional checks, which will
> alternate true/false
> for encapsulated protocols.
> 
> So please give us raw numbers, ftrace is too heavy weight for such claims.
> 

For the benchmarks, I used 100Gbit NIC mlx5 single-core (power management off), turboboost off.

Typical IPv6 traffic (zero extension headers):

    for i in {1..5}; do netperf -t TCP_STREAM -H 2001:db8:2:2::2 -l 90 | tail -1; done
    # before
    131072  16384  16384    90.00    16391.20
    131072  16384  16384    90.00    16403.50
    131072  16384  16384    90.00    16403.30
    131072  16384  16384    90.00    16397.84
    131072  16384  16384    90.00    16398.00

    # after
    131072  16384  16384    90.00    16399.85
    131072  16384  16384    90.00    16392.37
    131072  16384  16384    90.00    16403.06
    131072  16384  16384    90.00    16406.97
    131072  16384  16384    90.00    16406.09

IPv6 over IPv6 traffic:

    for i in {1..5}; do netperf -t TCP_STREAM -H 4001:db8:2:2::2 -l 90 | tail -1; done
    # before
    131072  16384  16384    90.00    14791.61
    131072  16384  16384    90.00    14791.66
    131072  16384  16384    90.00    14783.47
    131072  16384  16384    90.00    14810.17
    131072  16384  16384    90.00    14806.15

    # after
    131072  16384  16384    90.00    14793.49
    131072  16384  16384    90.00    14816.10
    131072  16384  16384    90.00    14818.41
    131072  16384  16384    90.00    14780.35
    131072  16384  16384    90.00    14800.48

IPv6 traffic with varying extension headers:

    for i in {1..5}; do netperf -t TCP_STREAM -H 2001:db8:2:2::2 -l 90 | tail -1; done
    # before
    131072  16384  16384    90.00    14812.37
    131072  16384  16384    90.00    14813.04
    131072  16384  16384    90.00    14802.54
    131072  16384  16384    90.00    14804.06
    131072  16384  16384    90.00    14819.08

    # after
    131072  16384  16384    90.00    14927.11
    131072  16384  16384    90.00    14910.45
    131072  16384  16384    90.00    14917.36
    131072  16384  16384    90.00    14916.53
    131072  16384  16384    90.00    14928.88

> >
> > Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
> > ---
> >  include/net/gro.h      |  8 ++++++--
> >  net/ethernet/eth.c     | 11 +++++++++--
> >  net/ipv4/af_inet.c     |  8 +++++++-
> >  net/ipv6/ip6_offload.c | 15 ++++++++++++---
> >  4 files changed, 34 insertions(+), 8 deletions(-)
> >
> > diff --git a/include/net/gro.h b/include/net/gro.h
> > index 7b47dd6ce94f..d364616cb930 100644
> > --- a/include/net/gro.h
> > +++ b/include/net/gro.h
> > @@ -41,8 +41,8 @@ struct napi_gro_cb {
> >         /* Number of segments aggregated. */
> >         u16     count;
> >
> > -       /* Used in ipv6_gro_receive() and foo-over-udp */
> > -       u16     proto;
> > +       /* Used in eth_gro_receive() */
> > +       __be16  network_proto;
> >
> >  /* Used in napi_gro_cb::free */
> >  #define NAPI_GRO_FREE             1
> > @@ -86,6 +86,10 @@ struct napi_gro_cb {
> >
> >         /* used to support CHECKSUM_COMPLETE for tunneling protocols */
> >         __wsum  csum;
> > +
> > +       /* Used in inet and ipv6 _gro_receive() */
> > +       u16     network_len;
> > +       u8      transport_proto;
> >  };
> >
> >  #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
> > diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
> > index 2edc8b796a4e..d68ad90f0a9e 100644
> > --- a/net/ethernet/eth.c
> > +++ b/net/ethernet/eth.c
> > @@ -439,6 +439,9 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
> >                 goto out;
> >         }
> >
> > +       if (!NAPI_GRO_CB(skb)->encap_mark)
> > +               NAPI_GRO_CB(skb)->network_proto = type;
> > +
> >         skb_gro_pull(skb, sizeof(*eh));
> >         skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
> >
> > @@ -456,12 +459,16 @@ EXPORT_SYMBOL(eth_gro_receive);
> >  int eth_gro_complete(struct sk_buff *skb, int nhoff)
> >  {
> >         struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
> 
> Why initializing @eh here is needed ?
> Presumably, for !skb->encapsulation, @eh would not be used.
> 

Fixed in v2, thanks

> > -       __be16 type = eh->h_proto;
> > +       __be16 type;
> >         struct packet_offload *ptype;
> >         int err = -ENOSYS;
> >
> > -       if (skb->encapsulation)
> > +       if (skb->encapsulation) {
> >                 skb_set_inner_mac_header(skb, nhoff);
> > +               type = eh->h_proto;
> > +       } else {
> > +               type = NAPI_GRO_CB(skb)->network_proto;
> > +       }
> >
> >         ptype = gro_find_complete_by_type(type);
> >         if (ptype != NULL)
> > diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
> > index 6c0ec2789943..4401af7b3a15 100644
> > --- a/net/ipv4/af_inet.c
> > +++ b/net/ipv4/af_inet.c
> > @@ -1551,6 +1551,9 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
> >          * immediately following this IP hdr.
> >          */
> >
> > +       if (!NAPI_GRO_CB(skb)->encap_mark)
> > +               NAPI_GRO_CB(skb)->transport_proto = proto;
> > +
> >         /* Note : No need to call skb_gro_postpull_rcsum() here,
> >          * as we already checked checksum over ipv4 header was 0
> >          */
> > @@ -1621,12 +1624,15 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
> >         __be16 newlen = htons(skb->len - nhoff);
> >         struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
> >         const struct net_offload *ops;
> > -       int proto = iph->protocol;
> > +       int proto;
> >         int err = -ENOSYS;
> >
> >         if (skb->encapsulation) {
> >                 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
> >                 skb_set_inner_network_header(skb, nhoff);
> > +               proto = iph->protocol;
> > +       } else {
> > +               proto = NAPI_GRO_CB(skb)->transport_proto;
> 
> I really doubt this change is needed.
> We need to access iph->fields in the following lines.
> Adding an else {} branch is adding extra code, and makes your patch
> longer to review.
> 

Good point, removed in v2.

> >         }
> >
> >         csum_replace2(&iph->check, iph->tot_len, newlen);
> > diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
> > index 00dc2e3b0184..79ba5882f576 100644
> > --- a/net/ipv6/ip6_offload.c
> > +++ b/net/ipv6/ip6_offload.c
> > @@ -227,11 +227,14 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
> >                 iph = ipv6_hdr(skb);
> >         }
> >
> > -       NAPI_GRO_CB(skb)->proto = proto;
> 
> I guess you missed BIG  TCP ipv4 changes under review... ->proto is now used.
> 

I rebased the patch now that ipv4 BIG TCP is merged, and made proto and
transport_proto be separate variables.

> > -
> >         flush--;
> >         nlen = skb_network_header_len(skb);
> >
> > +       if (!NAPI_GRO_CB(skb)->encap_mark) {
> > +               NAPI_GRO_CB(skb)->transport_proto = proto;
> > +               NAPI_GRO_CB(skb)->network_len = nlen;
> > +       }
> > +
> >         list_for_each_entry(p, head, list) {
> >                 const struct ipv6hdr *iph2;
> >                 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
> > @@ -358,7 +361,13 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
> >                 iph->payload_len = htons(payload_len);
> >         }
> >
> > -       nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
> > +       if (!skb->encapsulation) {
> > +               ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->transport_proto]);
> > +               nhoff += NAPI_GRO_CB(skb)->network_len;
> > +       } else {
> > +               nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
> 
> IMO ipv6_exthdrs_len() is quite fast for the typical case where we
> have no extension headers.
> 
> This new conditional check seems expensive to me.
> 

In v2 I moved the encapsulation branch at the beginning of the function to this
spot, merging the conditions.  So for the typical case, instead of another
redundant extension header length calculation (which requires at least one
branch and one dereference of iph data), it's a simple dereference to CB. Thus,
performance for the typical case is unharmed, and possibly even slightly improved.
For cases with a varying amount of extension headers in ipv6, there's a
performance upgrade (multiple memory dereference to iph data and conditional
checks are saved).

Also, after further inspection, I noticed that there is a potential problem in
ipv6_gro_complete(), where inner_network_header is initialized to the wrong
value.  The initialization of the inner_network_header field should be
performed after the BIG TCP if block. I fixed this in v2 by combining this
initialization with the new conditional check after the BIG TCP so the patch
does not add a conditional check anymore.



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] gro: optimise redundant parsing of packets
  2023-01-30 15:40   ` Alexander Lobakin
@ 2023-02-22 14:47     ` Richard Gobert
  0 siblings, 0 replies; 7+ messages in thread
From: Richard Gobert @ 2023-02-22 14:47 UTC (permalink / raw)
  To: Alexander Lobakin
  Cc: davem, edumazet, kuba, pabeni, yoshfuji, dsahern,
	steffen.klassert, lixiaoyan, alexanderduyck, leon, ye.xingchen,
	iwienand, netdev, linux-kernel

> > Currently, the IPv6 extension headers are parsed twice: first in
> > ipv6_gro_receive, and then again in ipv6_gro_complete.
> > 
> > The field NAPI_GRO_CB(skb)->proto is used by GRO to hold the layer 4
> > protocol type that comes after the IPv6 layer. I noticed that it is set
> > in ipv6_gro_receive, but isn't used anywhere. By using this field, and
> > also storing the size of the network header, we can avoid parsing
> > extension headers a second time in ipv6_gro_complete.
> > 
> > The implementation had to handle both inner and outer layers in case of
> > encapsulation (as they can't use the same field).
> > 
> > I've applied this optimisation to all base protocols (IPv6, IPv4,
> > Ethernet). Then, I benchmarked this patch on my machine, using ftrace to
> > measure ipv6_gro_complete's performance, and there was an improvement.
> 
> Would be nice to see some perf numbers. "there was an improvement"
> doesn't say a lot TBH...
> 

I just posted raw performance numbers as a reply to Eric's message. Take a
look there.

> > @@ -456,12 +459,16 @@ EXPORT_SYMBOL(eth_gro_receive);
> >  int eth_gro_complete(struct sk_buff *skb, int nhoff)
> >  {
> >  	struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
> > -	__be16 type = eh->h_proto;
> > +	__be16 type;
> 
> Please don't break RCT style when shortening/expanding variable
> declaration lines.

Will be fixed in v2.

> > @@ -358,7 +361,13 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
> >  		iph->payload_len = htons(payload_len);
> >  	}
> >  
> > -	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
> > +	if (!skb->encapsulation) {
> > +		ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->transport_proto]);
> > +		nhoff += NAPI_GRO_CB(skb)->network_len;
> 
> Why not use the same skb_network_header_len() here? Both
> skb->network_header and skb->transport_header must be set and correct at
> this point (if not, you can always fix that).
> 

When processing packets with encapsulation the network_header field is
overwritten when processing the inner IP header, so skb_network_header_len won't
return the correct value.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-02-22 14:49 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-30 13:00 [PATCH 0/2] gro: optimise redundant parsing of packets Richard Gobert
2023-01-30 13:05 ` [PATCH 1/2] gro: decrease size of CB Richard Gobert
2023-01-30 13:07 ` [PATCH 2/2] gro: optimise redundant parsing of packets Richard Gobert
2023-01-30 15:40   ` Alexander Lobakin
2023-02-22 14:47     ` Richard Gobert
2023-01-30 17:39   ` Eric Dumazet
2023-02-22 14:35     ` Richard Gobert

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.