All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vladimir Oltean <vladimir.oltean@nxp.com>
To: netdev@vger.kernel.org
Cc: "David S. Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"Claudiu Manoil" <claudiu.manoil@nxp.com>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	bpf@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH net-next 03/11] net: enetc: rename "cleaned_cnt" to "buffs_missing"
Date: Mon,  6 Feb 2023 12:08:29 +0200	[thread overview]
Message-ID: <20230206100837.451300-4-vladimir.oltean@nxp.com> (raw)
In-Reply-To: <20230206100837.451300-1-vladimir.oltean@nxp.com>

Calling enetc_bd_unused() on an RX ring returns the number of
descriptors necessary for the ring to be full with descriptors owned by
hardware (for it to put packets in).

Putting this value in a variable named "cleaned_cnt" is misleading to me,
especially since we may start the NAPI poll routine (enetc_clean_rx_ring)
with a non-zero cleaned_cnt.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
---
 drivers/net/ethernet/freescale/enetc/enetc.c | 41 ++++++++++----------
 1 file changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 2d8f79ddb78f..4a81a23539fb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1145,7 +1145,8 @@ static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
 
 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
 				       u32 bd_status, union enetc_rx_bd **rxbd,
-				       int *i, int *cleaned_cnt, int buffer_size)
+				       int *i, int *buffs_missing,
+				       int buffer_size)
 {
 	struct sk_buff *skb;
 	u16 size;
@@ -1157,7 +1158,7 @@ static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
 
 	enetc_get_offloads(rx_ring, *rxbd, skb);
 
-	(*cleaned_cnt)++;
+	(*buffs_missing)++;
 
 	enetc_rxbd_next(rx_ring, rxbd, i);
 
@@ -1173,7 +1174,7 @@ static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
 
 		enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
 
-		(*cleaned_cnt)++;
+		(*buffs_missing)++;
 
 		enetc_rxbd_next(rx_ring, rxbd, i);
 	}
@@ -1190,9 +1191,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
 			       struct napi_struct *napi, int work_limit)
 {
 	int rx_frm_cnt = 0, rx_byte_cnt = 0;
-	int cleaned_cnt, i;
+	int buffs_missing, i;
 
-	cleaned_cnt = enetc_bd_unused(rx_ring);
+	buffs_missing = enetc_bd_unused(rx_ring);
 	/* next descriptor to process */
 	i = rx_ring->next_to_clean;
 
@@ -1201,9 +1202,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
 		struct sk_buff *skb;
 		u32 bd_status;
 
-		if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
-			cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
-							    cleaned_cnt);
+		if (buffs_missing >= ENETC_RXBD_BUNDLE)
+			buffs_missing -= enetc_refill_rx_ring(rx_ring,
+							      buffs_missing);
 
 		rxbd = enetc_rxbd(rx_ring, i);
 		bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1218,7 +1219,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
 			break;
 
 		skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
-				      &cleaned_cnt, ENETC_RXB_DMA_SIZE);
+				      &buffs_missing, ENETC_RXB_DMA_SIZE);
 		if (!skb)
 			break;
 
@@ -1447,14 +1448,14 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
 
 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
 				 union enetc_rx_bd **rxbd, int *i,
-				 int *cleaned_cnt, struct xdp_buff *xdp_buff)
+				 int *buffs_missing, struct xdp_buff *xdp_buff)
 {
 	u16 size = le16_to_cpu((*rxbd)->r.buf_len);
 
 	xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
 
 	enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
-	(*cleaned_cnt)++;
+	(*buffs_missing)++;
 	enetc_rxbd_next(rx_ring, rxbd, i);
 
 	/* not last BD in frame? */
@@ -1468,7 +1469,7 @@ static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
 		}
 
 		enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
-		(*cleaned_cnt)++;
+		(*buffs_missing)++;
 		enetc_rxbd_next(rx_ring, rxbd, i);
 	}
 }
@@ -1524,16 +1525,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
 	struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
 	int rx_frm_cnt = 0, rx_byte_cnt = 0;
 	struct enetc_bdr *tx_ring;
-	int cleaned_cnt, i;
+	int buffs_missing, i;
 	u32 xdp_act;
 
-	cleaned_cnt = enetc_bd_unused(rx_ring);
+	buffs_missing = enetc_bd_unused(rx_ring);
 	/* next descriptor to process */
 	i = rx_ring->next_to_clean;
 
 	while (likely(rx_frm_cnt < work_limit)) {
 		union enetc_rx_bd *rxbd, *orig_rxbd;
-		int orig_i, orig_cleaned_cnt;
+		int orig_i, orig_buffs_missing;
 		struct xdp_buff xdp_buff;
 		struct sk_buff *skb;
 		u32 bd_status;
@@ -1552,11 +1553,11 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
 			break;
 
 		orig_rxbd = rxbd;
-		orig_cleaned_cnt = cleaned_cnt;
+		orig_buffs_missing = buffs_missing;
 		orig_i = i;
 
 		enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
-				     &cleaned_cnt, &xdp_buff);
+				     &buffs_missing, &xdp_buff);
 
 		xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
 
@@ -1572,11 +1573,11 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
 			break;
 		case XDP_PASS:
 			rxbd = orig_rxbd;
-			cleaned_cnt = orig_cleaned_cnt;
+			buffs_missing = orig_buffs_missing;
 			i = orig_i;
 
 			skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
-					      &i, &cleaned_cnt,
+					      &i, &buffs_missing,
 					      ENETC_RXB_DMA_SIZE_XDP);
 			if (unlikely(!skb))
 				goto out;
@@ -1640,7 +1641,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
 	if (xdp_tx_frm_cnt)
 		enetc_update_tx_ring_tail(tx_ring);
 
-	if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
+	if (buffs_missing > rx_ring->xdp.xdp_tx_in_flight)
 		enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
 				     rx_ring->xdp.xdp_tx_in_flight);
 
-- 
2.34.1


  parent reply	other threads:[~2023-02-06 10:09 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-06 10:08 [RFC PATCH net-next 00/11] NXP ENETC AF_XDP zero-copy sockets Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 01/11] net: enetc: optimize struct enetc_rx_swbd layout Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 02/11] net: enetc: perform XDP RX queue registration at enetc_setup_bpf() time Vladimir Oltean
2023-02-06 10:08 ` Vladimir Oltean [this message]
2023-02-06 10:08 ` [RFC PATCH net-next 04/11] net: enetc: continue NAPI processing on frames with RX errors Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 05/11] net: enetc: add support for ethtool --show-channels Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 06/11] net: enetc: consolidate rx_swbd freeing Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 07/11] net: enetc: rename enetc_free_tx_frame() to enetc_free_tx_swbd() Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 08/11] net: enetc: increment rx_byte_cnt for XDP data path Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 09/11] net: enetc: move setting of ENETC_TXBD_FLAGS_F flag to enetc_xdp_map_tx_buff() Vladimir Oltean
2023-02-06 10:08 ` [RFC PATCH net-next 10/11] net: enetc: add RX support for zero-copy XDP sockets Vladimir Oltean
2023-02-06 23:31   ` kernel test robot
2023-02-08 16:36   ` Maciej Fijalkowski
2023-02-06 10:08 ` [RFC PATCH net-next 11/11] net: enetc: add TX " Vladimir Oltean
2023-02-06 10:19   ` Vladimir Oltean
2023-02-08 16:38     ` Maciej Fijalkowski
2023-02-08 16:37   ` Maciej Fijalkowski
2023-02-08 17:08     ` Vladimir Oltean
2023-02-08 17:17       ` Maciej Fijalkowski
2023-03-20 16:30         ` Vladimir Oltean
2023-02-08 16:41 ` [RFC PATCH net-next 00/11] NXP ENETC AF_XDP zero-copy sockets Maciej Fijalkowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230206100837.451300-4-vladimir.oltean@nxp.com \
    --to=vladimir.oltean@nxp.com \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=claudiu.manoil@nxp.com \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.