All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] net: helper function for skb_shift
@ 2022-06-20 15:56 Richard Gobert
  2022-06-22  5:12 ` Jakub Kicinski
  2022-06-22  5:14 ` Jakub Kicinski
  0 siblings, 2 replies; 5+ messages in thread
From: Richard Gobert @ 2022-06-20 15:56 UTC (permalink / raw)
  To: davem, kuba, pabeni, steffen.klassert, herbert, yoshfuji,
	dsahern, edumazet, willemb, imagedong, talalahmad, kafai,
	vasily.averin, luiz.von.dentz, jk, netdev

Move the len fields manipulation in the skbs to a helper function.
There is a comment specifically requesting this and there are several
other areas in the code displaying the same pattern which can be
refactored.
This improves code readability.

Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
---
 include/net/sock.h   | 16 +++++++++++++---
 net/core/skbuff.c    | 13 +++----------
 net/ipv4/esp4.c      |  4 +---
 net/ipv4/ip_output.c |  8 ++------
 4 files changed, 19 insertions(+), 22 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index a01d6c421aa2..21122a22f624 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2218,6 +2218,18 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
 	return err;
 }
 
+/**
+ * skb_update_len - Updates len fields of skb
+ * @skb: buffer to add len to
+ * @len: number of bytes to add
+ */
+static inline void skb_update_len(struct sk_buff *skb, int len)
+{
+	skb->len += len;
+	skb->data_len += len;
+	skb->truesize += len;
+}
+
 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
 					   struct sk_buff *skb,
 					   struct page *page,
@@ -2230,9 +2242,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
 	if (err)
 		return err;
 
-	skb->len	     += copy;
-	skb->data_len	     += copy;
-	skb->truesize	     += copy;
+	skb_update_len(skb, copy);
 	sk_wmem_queued_add(sk, copy);
 	sk_mem_charge(sk, copy);
 	return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 30b523fa4ad2..e6b5dd69b78f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3195,9 +3195,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
 		}
 	}
 
-	to->truesize += len + plen;
-	to->len += len + plen;
-	to->data_len += len + plen;
+	skb_update_len(to, len + plen);
 
 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
 		skb_tx_error(from);
@@ -3634,13 +3632,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 	tgt->ip_summed = CHECKSUM_PARTIAL;
 	skb->ip_summed = CHECKSUM_PARTIAL;
 
-	/* Yak, is it really working this way? Some helper please? */
-	skb->len -= shiftlen;
-	skb->data_len -= shiftlen;
-	skb->truesize -= shiftlen;
-	tgt->len += shiftlen;
-	tgt->data_len += shiftlen;
-	tgt->truesize += shiftlen;
+	skb_update_len(skb, -shiftlen);
+	skb_update_len(tgt, shiftlen);
 
 	return shiftlen;
 }
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d747166bb291..6d4194349108 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -502,9 +502,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
 
 			nfrags++;
 
-			skb->len += tailen;
-			skb->data_len += tailen;
-			skb->truesize += tailen;
+			skb_update_len(skb, tailen);
 			if (sk && sk_fullsock(sk))
 				refcount_add(tailen, &sk->sk_wmem_alloc);
 
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 00b4bf26fd93..2c46ecc495a4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1214,9 +1214,7 @@ static int __ip_append_data(struct sock *sk,
 
 			pfrag->offset += copy;
 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
-			skb->len += copy;
-			skb->data_len += copy;
-			skb->truesize += copy;
+			skb_update_len(skb, copy);
 			wmem_alloc_delta += copy;
 		} else {
 			err = skb_zerocopy_iter_dgram(skb, from, copy);
@@ -1443,9 +1441,7 @@ ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
 			skb->csum = csum_block_add(skb->csum, csum, skb->len);
 		}
 
-		skb->len += len;
-		skb->data_len += len;
-		skb->truesize += len;
+		skb_update_len(skb, len);
 		refcount_add(len, &sk->sk_wmem_alloc);
 		offset += len;
 		size -= len;
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-06-22 15:37 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-20 15:56 [PATCH v2] net: helper function for skb_shift Richard Gobert
2022-06-22  5:12 ` Jakub Kicinski
2022-06-22 10:25   ` Richard Gobert
2022-06-22 15:37     ` Jakub Kicinski
2022-06-22  5:14 ` Jakub Kicinski

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.