netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Íñigo Huguet" <ihuguet@redhat.com>
To: ecree.xilinx@gmail.com, habetsm.xilinx@gmail.com, ap420073@gmail.com
Cc: davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
	pabeni@redhat.com, netdev@vger.kernel.org,
	"Íñigo Huguet" <ihuguet@redhat.com>
Subject: [PATCH net-next 4/5] sfc: refactor efx_set_xdp_tx_queues
Date: Tue, 10 May 2022 10:44:42 +0200	[thread overview]
Message-ID: <20220510084443.14473-5-ihuguet@redhat.com> (raw)
In-Reply-To: <20220510084443.14473-1-ihuguet@redhat.com>

Refactor this code to make easier to follow what's going on there and to
show the intent of the code more clearly.

No functional changes.

Signed-off-by: Íñigo Huguet <ihuguet@redhat.com>
---
 drivers/net/ethernet/sfc/efx_channels.c | 65 ++++++++++---------------
 1 file changed, 27 insertions(+), 38 deletions(-)

diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 1c05063a7215..f6634faa1ec4 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -781,17 +781,18 @@ static inline int efx_alloc_xdp_tx_queues(struct efx_nic *efx)
 }
 
 /* Assign a tx queue to one CPU for XDP_TX action */
-static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
-				struct efx_tx_queue *tx_queue)
+static inline int efx_set_xdp_tx_queue(struct efx_nic *efx, int cpu,
+				       struct efx_tx_queue *tx_queue)
 {
-	if (xdp_queue_number >= efx->xdp_tx_queue_count)
+	if (cpu >= efx->xdp_tx_queue_count)
 		return -EINVAL;
 
 	netif_dbg(efx, drv, efx->net_dev,
 		  "Channel %u TXQ %u is XDP %u, HW %u\n",
 		  tx_queue->channel->channel, tx_queue->label,
-		  xdp_queue_number, tx_queue->queue);
-	efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+		  cpu, tx_queue->queue);
+
+	efx->xdp_tx_queues[cpu] = tx_queue;
 	return 0;
 }
 
@@ -803,49 +804,37 @@ static void efx_set_xdp_tx_queues(struct efx_nic *efx)
 {
 	struct efx_tx_queue *tx_queue;
 	struct efx_channel *channel;
-	unsigned int next_queue = 0;
-	int xdp_queue_number = 0;
-	int rc;
-
-	efx_for_each_channel(channel, efx) {
-		if (channel->channel < efx->tx_channel_offset)
-			continue;
-
-		if (efx_channel_is_xdp_tx(channel)) {
+	unsigned int queue_num, cpu;
+
+	cpu = 0;
+	if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+		efx_for_each_tx_channel(channel, efx) {
+			/* borrow first channel's queue, with no csum offload */
+			if (efx_set_xdp_tx_queue(efx, cpu, &channel->tx_queue[0]) == 0)
+				cpu++;
+		}
+	} else {
+		efx_for_each_xdp_channel(channel, efx) {
 			efx_for_each_channel_tx_queue(tx_queue, channel) {
-				rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
-							  tx_queue);
-				if (rc == 0)
-					xdp_queue_number++;
+				if (efx_set_xdp_tx_queue(efx, cpu, tx_queue) == 0)
+					cpu++;
 			}
-		} else if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
-
-			/* If XDP is borrowing queues from net stack, it must
-			 * use the queue with no csum offload, which is the
-			 * first one of the channel
-			 * (note: tx_queue_by_type is not initialized yet)
-			 */
-			tx_queue = &channel->tx_queue[0];
-			rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
-						  tx_queue);
-			if (rc == 0)
-				xdp_queue_number++;
 		}
 	}
+
 	WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
-		xdp_queue_number != efx->xdp_tx_queue_count);
+		cpu != efx->xdp_tx_queue_count);
 	WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
-		xdp_queue_number > efx->xdp_tx_queue_count);
+		cpu > efx->xdp_tx_queue_count);
 
 	/* If we have more CPUs than assigned XDP TX queues, assign the already
 	 * existing queues to the exceeding CPUs
 	 */
-	next_queue = 0;
-	while (xdp_queue_number < efx->xdp_tx_queue_count) {
-		tx_queue = efx->xdp_tx_queues[next_queue++];
-		rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
-		if (rc == 0)
-			xdp_queue_number++;
+	queue_num = 0;
+	while (cpu < efx->xdp_tx_queue_count) {
+		tx_queue = efx->xdp_tx_queues[queue_num++];
+		if (efx_set_xdp_tx_queue(efx, cpu, tx_queue) == 0)
+			cpu++;
 	}
 }
 
-- 
2.34.1


  parent reply	other threads:[~2022-05-10  8:45 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-10  8:44 [PATCH net-next 0/5] sfc: refactor of efx_set_channels Íñigo Huguet
2022-05-10  8:44 ` [PATCH net-next 1/5] sfc: add new helper macros to iterate channels by type Íñigo Huguet
2022-05-11  7:19   ` Martin Habets
2022-05-11  8:41     ` Íñigo Huguet
2022-05-10  8:44 ` [PATCH net-next 2/5] sfc: separate channel->tx_queue and efx->xdp_tx_queue mappings Íñigo Huguet
2022-05-11  7:52   ` Martin Habets
2022-05-11  8:55     ` Íñigo Huguet
2022-05-10  8:44 ` [PATCH net-next 3/5] sfc: rename set_channels to set_queues and document it Íñigo Huguet
2022-05-10  8:44 ` Íñigo Huguet [this message]
2022-05-10  8:44 ` [PATCH net-next 5/5] sfc: move tx_channel_offset calculation to interrupts probe Íñigo Huguet
2022-05-11  8:02   ` Martin Habets

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220510084443.14473-5-ihuguet@redhat.com \
    --to=ihuguet@redhat.com \
    --cc=ap420073@gmail.com \
    --cc=davem@davemloft.net \
    --cc=ecree.xilinx@gmail.com \
    --cc=edumazet@google.com \
    --cc=habetsm.xilinx@gmail.com \
    --cc=kuba@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).