All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Krawczyk <mk@semihalf.com>
To: ferruh.yigit@intel.com
Cc: dev@dpdk.org, upstream@semihalf.com, shaibran@amazon.com,
	ndagan@amazon.com, igorch@amazon.com,
	Michal Krawczyk <mk@semihalf.com>,
	stable@dpdk.org
Subject: [dpdk-dev] [PATCH v2 3/7] net/ena: fix per-queue offload capabilities
Date: Fri, 15 Oct 2021 18:26:57 +0200	[thread overview]
Message-ID: <20211015162701.16324-4-mk@semihalf.com> (raw)
In-Reply-To: <20211015162701.16324-1-mk@semihalf.com>

PMD shouldn't advertise the same offloads as both per-queue and
per-port [1]. Each offload capability should go either to the
[rt]x_queue_offload_capa or [rt]x_offload_capa.

As ENA currently doesn't support offloads which could be configured
per-queue, only per-port flags should be set.

In addition, to make the code cleaner, parsing appropriate offload
flags is encapsulated into helper functions, in a similar matter it's
done by the other PMDs.

[1] https://doc.dpdk.org/guides/prog_guide/
    poll_mode_drv.html?highlight=offloads#hardware-offload

Fixes: 7369f88f88c0 ("net/ena: convert to new Rx offloads API")
Fixes: 56b8b9b7e5d2 ("net/ena: convert to new Tx offloads API")
Cc: stable@dpdk.org

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Shai Brandes <shaibran@amazon.com>
---
 drivers/net/ena/ena_ethdev.c | 90 ++++++++++++++++++++++++------------
 1 file changed, 60 insertions(+), 30 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 87216f75a9..c2bd2f12af 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -223,6 +223,10 @@ static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring);
 static int ena_queue_start_all(struct rte_eth_dev *dev,
 			       enum ena_ring_type ring_type);
 static void ena_stats_restart(struct rte_eth_dev *dev);
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
 static int ena_infos_get(struct rte_eth_dev *dev,
 			 struct rte_eth_dev_info *dev_info);
 static void ena_interrupt_handler_rte(void *cb_arg);
@@ -1959,12 +1963,63 @@ static void ena_init_rings(struct ena_adapter *adapter,
 	}
 }
 
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
+{
+	uint64_t port_offloads = 0;
+
+	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
+		port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+	if (adapter->offloads.rx_offloads &
+	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
+		port_offloads |=
+			DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+
+	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
+		port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+	port_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	return port_offloads;
+}
+
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
+{
+	uint64_t port_offloads = 0;
+
+	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
+		port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
+		port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (adapter->offloads.tx_offloads &
+	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
+	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
+		port_offloads |=
+			DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+
+	return port_offloads;
+}
+
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+
+	return 0;
+}
+
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+
+	return 0;
+}
+
 static int ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
 	struct ena_adapter *adapter;
 	struct ena_com_dev *ena_dev;
-	uint64_t rx_feat = 0, tx_feat = 0;
 
 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
@@ -1983,33 +2038,11 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 			ETH_LINK_SPEED_50G  |
 			ETH_LINK_SPEED_100G;
 
-	/* Set Tx & Rx features available for device */
-	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
-		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
-		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM;
-	if (adapter->offloads.tx_offloads &
-	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
-	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
-		tx_feat |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
-
-	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
-		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM;
-	if (adapter->offloads.rx_offloads &
-	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
-		rx_feat |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
-
-	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-	tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
 	/* Inform framework about available features */
-	dev_info->rx_offload_capa = rx_feat;
-	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev_info->rx_queue_offload_capa = rx_feat;
-	dev_info->tx_offload_capa = tx_feat;
-	dev_info->tx_queue_offload_capa = tx_feat;
+	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
+	dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
+	dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
+	dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
 
 	dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF;
 	dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
@@ -2022,9 +2055,6 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = adapter->max_num_io_queues;
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
-	adapter->tx_supported_offloads = tx_feat;
-	adapter->rx_supported_offloads = rx_feat;
-
 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
-- 
2.25.1


  parent reply	other threads:[~2021-10-15 16:28 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-14 20:18 [dpdk-dev] [PATCH 0/7] net/ena: update ENA PMD to v2.5.0 Michal Krawczyk
2021-10-14 20:18 ` [dpdk-dev] [PATCH 1/7] net/ena: fix verification of the offload capabilities Michal Krawczyk
2021-10-14 20:18 ` [dpdk-dev] [PATCH 2/7] net/ena: support Tx/Rx free thresholds Michal Krawczyk
2021-10-14 20:18 ` [dpdk-dev] [PATCH 3/7] net/ena: fix per-queue offload capabilities Michal Krawczyk
2021-10-14 20:18 ` [dpdk-dev] [PATCH 4/7] net/ena: indicate missing scattered Rx capability Michal Krawczyk
2021-10-14 20:18 ` [dpdk-dev] [PATCH 5/7] net/ena: add NUMA aware allocations Michal Krawczyk
2021-10-14 20:18 ` [dpdk-dev] [PATCH 6/7] net/ena: add check for missing Tx completions Michal Krawczyk
2021-10-14 20:18 ` [dpdk-dev] [PATCH 7/7] net/ena: update version to 2.5.0 Michal Krawczyk
2021-10-15 16:26 ` [dpdk-dev] [PATCH 0/7] net/ena: update ENA PMD to v2.5.0 Michal Krawczyk
2021-10-15 16:26   ` [dpdk-dev] [PATCH v2 1/7] net/ena: fix verification of the offload capabilities Michal Krawczyk
2021-10-15 16:26   ` [dpdk-dev] [PATCH v2 2/7] net/ena: support Tx/Rx free thresholds Michal Krawczyk
2021-10-15 16:26   ` Michal Krawczyk [this message]
2021-10-15 16:26   ` [dpdk-dev] [PATCH v2 4/7] net/ena: indicate missing scattered Rx capability Michal Krawczyk
2021-10-15 16:26   ` [dpdk-dev] [PATCH v2 5/7] net/ena: add NUMA aware allocations Michal Krawczyk
2021-10-15 16:27   ` [dpdk-dev] [PATCH v2 6/7] net/ena: add check for missing Tx completions Michal Krawczyk
2021-10-15 16:27   ` [dpdk-dev] [PATCH v2 7/7] net/ena: update version to 2.5.0 Michal Krawczyk
2021-10-18 20:51   ` [dpdk-dev] [PATCH 0/7] net/ena: update ENA PMD to v2.5.0 Ferruh Yigit
2021-10-19  9:05     ` Michał Krawczyk
2021-10-19 10:56   ` [dpdk-dev] [PATCH v3 " Michal Krawczyk
2021-10-19 10:56     ` [dpdk-dev] [PATCH v3 1/7] net/ena: fix verification of the offload capabilities Michal Krawczyk
2021-10-19 10:56     ` [dpdk-dev] [PATCH v3 2/7] net/ena: support Tx/Rx free thresholds Michal Krawczyk
2021-10-19 10:56     ` [dpdk-dev] [PATCH v3 3/7] net/ena: fix per-queue offload capabilities Michal Krawczyk
2021-10-19 12:25       ` Ferruh Yigit
2021-10-19 10:56     ` [dpdk-dev] [PATCH v3 4/7] net/ena: indicate missing scattered Rx capability Michal Krawczyk
2021-10-19 10:56     ` [dpdk-dev] [PATCH v3 5/7] net/ena: add NUMA aware allocations Michal Krawczyk
2021-10-19 10:56     ` [dpdk-dev] [PATCH v3 6/7] net/ena: add check for missing Tx completions Michal Krawczyk
2021-10-19 12:40       ` Ferruh Yigit
2021-10-19 10:56     ` [dpdk-dev] [PATCH v3 7/7] net/ena: update version to 2.5.0 Michal Krawczyk
2021-10-19 13:05     ` [dpdk-dev] [PATCH v3 0/7] net/ena: update ENA PMD to v2.5.0 Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211015162701.16324-4-mk@semihalf.com \
    --to=mk@semihalf.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=igorch@amazon.com \
    --cc=ndagan@amazon.com \
    --cc=shaibran@amazon.com \
    --cc=stable@dpdk.org \
    --cc=upstream@semihalf.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.