All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] net/ena: convert to new offloads API
@ 2018-01-16 11:52 Rafal Kozik
  2018-01-16 11:52 ` [PATCH 1/2] net/ena: convert to new Tx " Rafal Kozik
  2018-01-16 11:52 ` [PATCH 2/2] net/ena: convert to new Rx " Rafal Kozik
  0 siblings, 2 replies; 17+ messages in thread
From: Rafal Kozik @ 2018-01-16 11:52 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, Rafal Kozik

Ethdev offloads API has changed since:

commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")

Those patches support new offloads API.

Rafal Kozik (2):
  net/ena: convert to new Tx offloads API
  net/ena: convert to new Rx offloads API

 drivers/net/ena/ena_ethdev.c | 109 ++++++++++++++++++++++++++++++++++++-------
 drivers/net/ena/ena_ethdev.h |   5 ++
 2 files changed, 97 insertions(+), 17 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 1/2] net/ena: convert to new Tx offloads API
  2018-01-16 11:52 [PATCH 0/2] net/ena: convert to new offloads API Rafal Kozik
@ 2018-01-16 11:52 ` Rafal Kozik
  2018-01-17  6:56   ` Shahaf Shuler
  2018-01-17  8:23   ` [PATCH v2 " Rafal Kozik
  2018-01-16 11:52 ` [PATCH 2/2] net/ena: convert to new Rx " Rafal Kozik
  1 sibling, 2 replies; 17+ messages in thread
From: Rafal Kozik @ 2018-01-16 11:52 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, Rafal Kozik

Ethdev Tx offloads API has changed since:

commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")

This commit support the new Tx offloads API. Queue configuration
is stored in ena_ring.offloads. During preparing mbufs for tx, offloads are
allowed only if appropriate flags in this field are set.

Signed-off-by: Rafal Kozik <rk@semihalf.com>
---
 drivers/net/ena/ena_ethdev.c | 73 +++++++++++++++++++++++++++++++++++---------
 drivers/net/ena/ena_ethdev.h |  3 ++
 2 files changed, 61 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 22db895..6473776 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -164,6 +164,14 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 #define ENA_STATS_ARRAY_ENA_COM	ARRAY_SIZE(ena_stats_ena_com_strings)
 
+#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
+			DEV_TX_OFFLOAD_UDP_CKSUM |\
+			DEV_TX_OFFLOAD_IPV4_CKSUM |\
+			DEV_TX_OFFLOAD_TCP_TSO)
+#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
+		       PKT_TX_IP_CKSUM |\
+		       PKT_TX_TCP_SEG)
+
 /** Vendor ID used by Amazon devices */
 #define PCI_VENDOR_ID_AMAZON 0x1D0F
 /** Amazon devices */
@@ -227,6 +235,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
 }
 
 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
-				       struct ena_com_tx_ctx *ena_tx_ctx)
+				       struct ena_com_tx_ctx *ena_tx_ctx,
+				       uint64_t queue_offloads)
 {
 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
 
-	if (mbuf->ol_flags &
-	    (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
+	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
+	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
-		if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
+		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
+		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
 		}
 
 		/* check if L3 checksum is needed */
-		if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
+		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -310,19 +323,17 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 		}
 
 		/* check if L4 checksum is needed */
-		switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
-		case PKT_TX_TCP_CKSUM:
+		if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
+		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
-			break;
-		case PKT_TX_UDP_CKSUM:
+		} else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
+			   (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
-			break;
-		default:
+		} else {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
 			ena_tx_ctx->l4_csum_enable = false;
-			break;
 		}
 
 		ena_meta->mss = mbuf->tso_segsz;
@@ -945,7 +956,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      __rte_unused const struct rte_eth_txconf *tx_conf)
+			      const struct rte_eth_txconf *tx_conf)
 {
 	struct ena_com_create_io_ctx ctx =
 		/* policy set to _HOST just to satisfy icc compiler */
@@ -982,6 +993,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (!ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
+		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+		return -EINVAL;
+	}
+
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1036,6 +1052,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
+	txq->offloads = tx_conf->offloads;
+
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
 	dev->data->tx_queues[queue_idx] = txq;
@@ -1386,6 +1404,14 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
+	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
+		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
+		    "requested 0x%lx supported 0x%lx\n",
+		    tx_offloads, adapter->tx_supported_offloads);
+		return -ENOTSUP;
+	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1407,6 +1433,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
+	adapter->tx_selected_offloads = tx_offloads;
 	return 0;
 }
 
@@ -1435,13 +1462,26 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads)
+{
+	uint64_t port_offloads = adapter->tx_selected_offloads;
+
+	/* Check if port supports all requested offloads.
+	 * True if all offloads selected for queue are set for port.
+	 */
+	if ((offloads & port_offloads) != offloads)
+		return false;
+	return true;
+}
+
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
 	struct ena_adapter *adapter;
 	struct ena_com_dev *ena_dev;
 	struct ena_com_dev_get_features_ctx feat;
-	uint32_t rx_feat = 0, tx_feat = 0;
+	uint64_t rx_feat = 0, tx_feat = 0;
 	int rc = 0;
 
 	ena_assert_msg(dev->data != NULL, "Uninitialized device");
@@ -1490,6 +1530,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
+	dev_info->tx_queue_offload_capa = tx_feat;
 
 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
 	dev_info->max_rx_pktlen  = adapter->max_mtu;
@@ -1498,6 +1539,8 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = adapter->num_queues;
 	dev_info->max_tx_queues = adapter->num_queues;
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
+
+	adapter->tx_supported_offloads = tx_feat;
 }
 
 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1714,7 +1757,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		} /* there's no else as we take advantage of memset zeroing */
 
 		/* Set TX offloads flags, if applicable */
-		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);
+		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads);
 
 		if (unlikely(mbuf->ol_flags &
 			     (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index be8bc9f..3e72777 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -91,6 +91,7 @@ struct ena_ring {
 	uint8_t tx_max_header_size;
 	int configured;
 	struct ena_adapter *adapter;
+	uint64_t offloads;
 } __rte_cache_aligned;
 
 enum ena_adapter_state {
@@ -175,6 +176,8 @@ struct ena_adapter {
 	struct ena_driver_stats *drv_stats;
 	enum ena_adapter_state state;
 
+	uint64_t tx_supported_offloads;
+	uint64_t tx_selected_offloads;
 };
 
 #endif /* _ENA_ETHDEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 2/2] net/ena: convert to new Rx offloads API
  2018-01-16 11:52 [PATCH 0/2] net/ena: convert to new offloads API Rafal Kozik
  2018-01-16 11:52 ` [PATCH 1/2] net/ena: convert to new Tx " Rafal Kozik
@ 2018-01-16 11:52 ` Rafal Kozik
  2018-01-17  6:57   ` Shahaf Shuler
  2018-01-17  8:26   ` [PATCH v2 " Rafal Kozik
  1 sibling, 2 replies; 17+ messages in thread
From: Rafal Kozik @ 2018-01-16 11:52 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, Rafal Kozik

Ethdev Rx offloads API has changed since:

commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")

This commit support the new Rx offloads API.

Signed-off-by: Rafal Kozik <rk@semihalf.com>
---
 drivers/net/ena/ena_ethdev.c | 36 ++++++++++++++++++++++++++++++++++--
 drivers/net/ena/ena_ethdev.h |  2 ++
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 6473776..f069ca8 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -237,6 +237,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
 static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
 					      uint64_t offloads);
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -766,7 +768,8 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
 {
 	uint32_t max_frame_len = adapter->max_mtu;
 
-	if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1)
+	if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
+	    DEV_RX_OFFLOAD_JUMBO_FRAME)
 		max_frame_len =
 			adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
 
@@ -1065,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      __rte_unused const struct rte_eth_rxconf *rx_conf,
+			      const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1101,6 +1104,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
+		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+		return -EINVAL;
+	}
+
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1405,6 +1413,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
 	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
 		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
@@ -1413,6 +1422,13 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		return -ENOTSUP;
 	}
 
+	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
+		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
+		    "requested 0x%lx supported 0x%lx\n",
+		    rx_offloads, adapter->rx_supported_offloads);
+		return -ENOTSUP;
+	}
+
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
 		PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
@@ -1434,6 +1450,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	adapter->tx_selected_offloads = tx_offloads;
+	adapter->rx_selected_offloads = rx_offloads;
 	return 0;
 }
 
@@ -1475,6 +1492,19 @@ static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
 	return true;
 }
 
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads)
+{
+	uint64_t port_offloads = adapter->rx_selected_offloads;
+
+	/* Check if port supports all requested offloads.
+	 * True if all offloads selected for queue are set for port.
+	 */
+	if ((offloads & port_offloads) != offloads)
+		return false;
+	return true;
+}
+
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
@@ -1529,6 +1559,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
+	dev_info->rx_queue_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
 	dev_info->tx_queue_offload_capa = tx_feat;
 
@@ -1541,6 +1572,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
 	adapter->tx_supported_offloads = tx_feat;
+	adapter->rx_supported_offloads = rx_feat;
 }
 
 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 3e72777..394d05e 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -178,6 +178,8 @@ struct ena_adapter {
 
 	uint64_t tx_supported_offloads;
 	uint64_t tx_selected_offloads;
+	uint64_t rx_supported_offloads;
+	uint64_t rx_selected_offloads;
 };
 
 #endif /* _ENA_ETHDEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/2] net/ena: convert to new Tx offloads API
  2018-01-16 11:52 ` [PATCH 1/2] net/ena: convert to new Tx " Rafal Kozik
@ 2018-01-17  6:56   ` Shahaf Shuler
  2018-01-17 18:58     ` Ferruh Yigit
  2018-01-17  8:23   ` [PATCH v2 " Rafal Kozik
  1 sibling, 1 reply; 17+ messages in thread
From: Shahaf Shuler @ 2018-01-17  6:56 UTC (permalink / raw)
  To: Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch

Tuesday, January 16, 2018 1:53 PM, Rafal Kozik:
> Subject: [dpdk-dev] [PATCH 1/2] net/ena: convert to new Tx offloads API
> 
> Ethdev Tx offloads API has changed since:
> 
> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
> 
> This commit support the new Tx offloads API. Queue configuration is stored
> in ena_ring.offloads. During preparing mbufs for tx, offloads are allowed only
> if appropriate flags in this field are set.
> 
> Signed-off-by: Rafal Kozik <rk@semihalf.com>
> ---
>  drivers/net/ena/ena_ethdev.c | 73
> +++++++++++++++++++++++++++++++++++---------
>  drivers/net/ena/ena_ethdev.h |  3 ++
>  2 files changed, 61 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
> index 22db895..6473776 100644
> --- a/drivers/net/ena/ena_ethdev.c
> +++ b/drivers/net/ena/ena_ethdev.c
> @@ -164,6 +164,14 @@ static const struct ena_stats
> ena_stats_ena_com_strings[] = {
>  #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
>  #define ENA_STATS_ARRAY_ENA_COM
> 	ARRAY_SIZE(ena_stats_ena_com_strings)
> 
> +#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
> +			DEV_TX_OFFLOAD_UDP_CKSUM |\
> +			DEV_TX_OFFLOAD_IPV4_CKSUM |\
> +			DEV_TX_OFFLOAD_TCP_TSO)
> +#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
> +		       PKT_TX_IP_CKSUM |\
> +		       PKT_TX_TCP_SEG)
> +
>  /** Vendor ID used by Amazon devices */  #define
> PCI_VENDOR_ID_AMAZON 0x1D0F
>  /** Amazon devices */
> @@ -227,6 +235,8 @@ static int ena_rss_reta_query(struct rte_eth_dev
> *dev,
>  			      struct rte_eth_rss_reta_entry64 *reta_conf,
>  			      uint16_t reta_size);
>  static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
> +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter
> *adapter,
> +					      uint64_t offloads);
> 
>  static const struct eth_dev_ops ena_dev_ops = {
>  	.dev_configure        = ena_dev_configure,
> @@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct
> rte_mbuf *mbuf,  }
> 
>  static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
> -				       struct ena_com_tx_ctx *ena_tx_ctx)
> +				       struct ena_com_tx_ctx *ena_tx_ctx,
> +				       uint64_t queue_offloads)
>  {
>  	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
> 
> -	if (mbuf->ol_flags &
> -	    (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
> +	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
> +	    (queue_offloads & QUEUE_OFFLOADS)) {
>  		/* check if TSO is required */
> -		if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
> +		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
>  			ena_tx_ctx->tso_enable = true;
> 
>  			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
>  		}
> 
>  		/* check if L3 checksum is needed */
> -		if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
> +		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
>  			ena_tx_ctx->l3_csum_enable = true;
> 
>  		if (mbuf->ol_flags & PKT_TX_IPV6) {
> @@ -310,19 +323,17 @@ static inline void ena_tx_mbuf_prepare(struct
> rte_mbuf *mbuf,
>  		}
> 
>  		/* check if L4 checksum is needed */
> -		switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
> -		case PKT_TX_TCP_CKSUM:
> +		if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
>  			ena_tx_ctx->l4_proto =
> ENA_ETH_IO_L4_PROTO_TCP;
>  			ena_tx_ctx->l4_csum_enable = true;
> -			break;
> -		case PKT_TX_UDP_CKSUM:
> +		} else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
> +			   (queue_offloads &
> DEV_TX_OFFLOAD_UDP_CKSUM)) {
>  			ena_tx_ctx->l4_proto =
> ENA_ETH_IO_L4_PROTO_UDP;
>  			ena_tx_ctx->l4_csum_enable = true;
> -			break;
> -		default:
> +		} else {
>  			ena_tx_ctx->l4_proto =
> ENA_ETH_IO_L4_PROTO_UNKNOWN;
>  			ena_tx_ctx->l4_csum_enable = false;
> -			break;
>  		}
> 
>  		ena_meta->mss = mbuf->tso_segsz;
> @@ -945,7 +956,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev
> *dev,
>  			      uint16_t queue_idx,
>  			      uint16_t nb_desc,
>  			      __rte_unused unsigned int socket_id,
> -			      __rte_unused const struct rte_eth_txconf
> *tx_conf)
> +			      const struct rte_eth_txconf *tx_conf)
>  {
>  	struct ena_com_create_io_ctx ctx =
>  		/* policy set to _HOST just to satisfy icc compiler */ @@ -
> 982,6 +993,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
>  		return -EINVAL;
>  	}
> 
> +	if (!ena_are_tx_queue_offloads_allowed(adapter, tx_conf-
> >offloads)) {
> +		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
> +		return -EINVAL;
> +	}

Here is it better to check also the ETH_TXQ_FLAGS_IGNORE.
If application has not yet moved to the new API, then it won't set any port Tx offloads. So for old applications, the ena_are_tx_queue_offloads_allowed is not necessary. 


> +
>  	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
> 
>  	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; @@ -1036,6
> +1052,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
>  	for (i = 0; i < txq->ring_size; i++)
>  		txq->empty_tx_reqs[i] = i;
> 
> +	txq->offloads = tx_conf->offloads;
> +
>  	/* Store pointer to this queue in upper layer */
>  	txq->configured = 1;
>  	dev->data->tx_queues[queue_idx] = txq; @@ -1386,6 +1404,14 @@
> static int ena_dev_configure(struct rte_eth_dev *dev)  {
>  	struct ena_adapter *adapter =
>  		(struct ena_adapter *)(dev->data->dev_private);
> +	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
> +
> +	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
> +		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
> +		    "requested 0x%lx supported 0x%lx\n",
> +		    tx_offloads, adapter->tx_supported_offloads);
> +		return -ENOTSUP;
> +	}


32bit compilation will break with above print. Using PRIx64 is preferable (I also changed it on the mlx series).

> 
>  	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
>  	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) { @@ -
> 1407,6 +1433,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
>  		break;
>  	}
> 
> +	adapter->tx_selected_offloads = tx_offloads;
>  	return 0;
>  }
> 
> @@ -1435,13 +1462,26 @@ static void ena_init_rings(struct ena_adapter
> *adapter)
>  	}
>  }
> 
> +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter
> *adapter,
> +					      uint64_t offloads)
> +{
> +	uint64_t port_offloads = adapter->tx_selected_offloads;
> +
> +	/* Check if port supports all requested offloads.
> +	 * True if all offloads selected for queue are set for port.
> +	 */
> +	if ((offloads & port_offloads) != offloads)
> +		return false;
> +	return true;
> +}
> +
>  static void ena_infos_get(struct rte_eth_dev *dev,
>  			  struct rte_eth_dev_info *dev_info)  {
>  	struct ena_adapter *adapter;
>  	struct ena_com_dev *ena_dev;
>  	struct ena_com_dev_get_features_ctx feat;
> -	uint32_t rx_feat = 0, tx_feat = 0;
> +	uint64_t rx_feat = 0, tx_feat = 0;
>  	int rc = 0;
> 
>  	ena_assert_msg(dev->data != NULL, "Uninitialized device"); @@ -
> 1490,6 +1530,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
>  	/* Inform framework about available features */
>  	dev_info->rx_offload_capa = rx_feat;
>  	dev_info->tx_offload_capa = tx_feat;
> +	dev_info->tx_queue_offload_capa = tx_feat;
> 
>  	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
>  	dev_info->max_rx_pktlen  = adapter->max_mtu; @@ -1498,6
> +1539,8 @@ static void ena_infos_get(struct rte_eth_dev *dev,
>  	dev_info->max_rx_queues = adapter->num_queues;
>  	dev_info->max_tx_queues = adapter->num_queues;
>  	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
> +
> +	adapter->tx_supported_offloads = tx_feat;
>  }
> 
>  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, @@ -1714,7 +1757,7 @@ static uint16_t eth_ena_xmit_pkts(void
> *tx_queue, struct rte_mbuf **tx_pkts,
>  		} /* there's no else as we take advantage of memset zeroing
> */
> 
>  		/* Set TX offloads flags, if applicable */
> -		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);
> +		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring-
> >offloads);
> 
>  		if (unlikely(mbuf->ol_flags &
>  			     (PKT_RX_L4_CKSUM_BAD |
> PKT_RX_IP_CKSUM_BAD))) diff --git a/drivers/net/ena/ena_ethdev.h
> b/drivers/net/ena/ena_ethdev.h index be8bc9f..3e72777 100644
> --- a/drivers/net/ena/ena_ethdev.h
> +++ b/drivers/net/ena/ena_ethdev.h
> @@ -91,6 +91,7 @@ struct ena_ring {
>  	uint8_t tx_max_header_size;
>  	int configured;
>  	struct ena_adapter *adapter;
> +	uint64_t offloads;
>  } __rte_cache_aligned;
> 
>  enum ena_adapter_state {
> @@ -175,6 +176,8 @@ struct ena_adapter {
>  	struct ena_driver_stats *drv_stats;
>  	enum ena_adapter_state state;
> 
> +	uint64_t tx_supported_offloads;
> +	uint64_t tx_selected_offloads;
>  };
> 
>  #endif /* _ENA_ETHDEV_H_ */

Rest looks OK. 

Reviewed-By: Shahaf Shuler <shahafs@mellanox.com>


> --
> 2.7.4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/2] net/ena: convert to new Rx offloads API
  2018-01-16 11:52 ` [PATCH 2/2] net/ena: convert to new Rx " Rafal Kozik
@ 2018-01-17  6:57   ` Shahaf Shuler
  2018-01-17  8:26   ` [PATCH v2 " Rafal Kozik
  1 sibling, 0 replies; 17+ messages in thread
From: Shahaf Shuler @ 2018-01-17  6:57 UTC (permalink / raw)
  To: Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch

Tuesday, January 16, 2018 1:53 PM, Rafal Kozik:
> Ethdev Rx offloads API has changed since:
> 
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
> 
> This commit support the new Rx offloads API.
> 
> Signed-off-by: Rafal Kozik <rk@semihalf.com>
> ---
>  drivers/net/ena/ena_ethdev.c | 36
> ++++++++++++++++++++++++++++++++++--
>  drivers/net/ena/ena_ethdev.h |  2 ++
>  2 files changed, 36 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
> index 6473776..f069ca8 100644
> --- a/drivers/net/ena/ena_ethdev.c
> +++ b/drivers/net/ena/ena_ethdev.c
> @@ -237,6 +237,8 @@ static int ena_rss_reta_query(struct rte_eth_dev
> *dev,  static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
> static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter
> *adapter,
>  					      uint64_t offloads);
> +static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter
> *adapter,
> +					      uint64_t offloads);
> 
>  static const struct eth_dev_ops ena_dev_ops = {
>  	.dev_configure        = ena_dev_configure,
> @@ -766,7 +768,8 @@ static uint32_t ena_get_mtu_conf(struct
> ena_adapter *adapter)  {
>  	uint32_t max_frame_len = adapter->max_mtu;
> 
> -	if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame ==
> 1)
> +	if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
> +	    DEV_RX_OFFLOAD_JUMBO_FRAME)
>  		max_frame_len =
>  			adapter->rte_eth_dev_data-
> >dev_conf.rxmode.max_rx_pkt_len;
> 
> @@ -1065,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev
> *dev,
>  			      uint16_t queue_idx,
>  			      uint16_t nb_desc,
>  			      __rte_unused unsigned int socket_id,
> -			      __rte_unused const struct rte_eth_rxconf
> *rx_conf,
> +			      const struct rte_eth_rxconf *rx_conf,
>  			      struct rte_mempool *mp)
>  {
>  	struct ena_com_create_io_ctx ctx =
> @@ -1101,6 +1104,11 @@ static int ena_rx_queue_setup(struct
> rte_eth_dev *dev,
>  		return -EINVAL;
>  	}
> 
> +	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf-
> >offloads)) {
> +		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
> +		return -EINVAL;
> +	}
> +
>  	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
> 
>  	ctx.qid = ena_qid;
> @@ -1405,6 +1413,7 @@ static int ena_dev_configure(struct rte_eth_dev
> *dev)
>  	struct ena_adapter *adapter =
>  		(struct ena_adapter *)(dev->data->dev_private);
>  	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
> +	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
> 
>  	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
>  		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
> @@ -1413,6 +1422,13 @@ static int ena_dev_configure(struct rte_eth_dev
> *dev)
>  		return -ENOTSUP;
>  	}
> 
> +	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
> +		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
> +		    "requested 0x%lx supported 0x%lx\n",
> +		    rx_offloads, adapter->rx_supported_offloads);
> +		return -ENOTSUP;
> +	}
> +

Same comment about the 32b compilation. 

>  	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
>  	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
>  		PMD_INIT_LOG(ERR, "Illegal adapter state: %d", @@ -1434,6
> +1450,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
>  	}
> 
>  	adapter->tx_selected_offloads = tx_offloads;
> +	adapter->rx_selected_offloads = rx_offloads;
>  	return 0;
>  }
> 
> @@ -1475,6 +1492,19 @@ static bool
> ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
>  	return true;
>  }
> 
> +static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter
> *adapter,
> +					      uint64_t offloads)
> +{
> +	uint64_t port_offloads = adapter->rx_selected_offloads;
> +
> +	/* Check if port supports all requested offloads.
> +	 * True if all offloads selected for queue are set for port.
> +	 */
> +	if ((offloads & port_offloads) != offloads)
> +		return false;
> +	return true;
> +}
> +
>  static void ena_infos_get(struct rte_eth_dev *dev,
>  			  struct rte_eth_dev_info *dev_info)  { @@ -1529,6
> +1559,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
> 
>  	/* Inform framework about available features */
>  	dev_info->rx_offload_capa = rx_feat;
> +	dev_info->rx_queue_offload_capa = rx_feat;
>  	dev_info->tx_offload_capa = tx_feat;
>  	dev_info->tx_queue_offload_capa = tx_feat;
> 
> @@ -1541,6 +1572,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
>  	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
> 
>  	adapter->tx_supported_offloads = tx_feat;
> +	adapter->rx_supported_offloads = rx_feat;
>  }
> 
>  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, diff --git a/drivers/net/ena/ena_ethdev.h
> b/drivers/net/ena/ena_ethdev.h index 3e72777..394d05e 100644
> --- a/drivers/net/ena/ena_ethdev.h
> +++ b/drivers/net/ena/ena_ethdev.h
> @@ -178,6 +178,8 @@ struct ena_adapter {
> 
>  	uint64_t tx_supported_offloads;
>  	uint64_t tx_selected_offloads;
> +	uint64_t rx_supported_offloads;
> +	uint64_t rx_selected_offloads;
>  };
> 
>  #endif /* _ENA_ETHDEV_H_ */

Reviewed-By: Shahaf Shuler <shahafs@mellanox.com>

> --
> 2.7.4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v2 1/2] net/ena: convert to new Tx offloads API
  2018-01-16 11:52 ` [PATCH 1/2] net/ena: convert to new Tx " Rafal Kozik
  2018-01-17  6:56   ` Shahaf Shuler
@ 2018-01-17  8:23   ` Rafal Kozik
  2018-01-17 18:58     ` Ferruh Yigit
  2018-01-18 14:53     ` Ferruh Yigit
  1 sibling, 2 replies; 17+ messages in thread
From: Rafal Kozik @ 2018-01-17  8:23 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, Rafal Kozik

Ethdev Tx offloads API has changed since:

commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")

This commit support the new Tx offloads API. Queue configuration
is stored in ena_ring.offloads. During preparing mbufs for tx, offloads are
allowed only if appropriate flags in this field are set.

Signed-off-by: Rafal Kozik <rk@semihalf.com>
---
v2:
 * Check ETH_TXQ_FLAGS_IGNORE flag.
 * Use PRIx64 in printf.

 drivers/net/ena/ena_ethdev.c | 74 +++++++++++++++++++++++++++++++++++---------
 drivers/net/ena/ena_ethdev.h |  3 ++
 2 files changed, 62 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 22db895..54ccc3d 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -164,6 +164,14 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
 #define ENA_STATS_ARRAY_ENA_COM	ARRAY_SIZE(ena_stats_ena_com_strings)
 
+#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
+			DEV_TX_OFFLOAD_UDP_CKSUM |\
+			DEV_TX_OFFLOAD_IPV4_CKSUM |\
+			DEV_TX_OFFLOAD_TCP_TSO)
+#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
+		       PKT_TX_IP_CKSUM |\
+		       PKT_TX_TCP_SEG)
+
 /** Vendor ID used by Amazon devices */
 #define PCI_VENDOR_ID_AMAZON 0x1D0F
 /** Amazon devices */
@@ -227,6 +235,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
 }
 
 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
-				       struct ena_com_tx_ctx *ena_tx_ctx)
+				       struct ena_com_tx_ctx *ena_tx_ctx,
+				       uint64_t queue_offloads)
 {
 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
 
-	if (mbuf->ol_flags &
-	    (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
+	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
+	    (queue_offloads & QUEUE_OFFLOADS)) {
 		/* check if TSO is required */
-		if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
+		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
+		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
 			ena_tx_ctx->tso_enable = true;
 
 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
 		}
 
 		/* check if L3 checksum is needed */
-		if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
+		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
 			ena_tx_ctx->l3_csum_enable = true;
 
 		if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -310,19 +323,17 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 		}
 
 		/* check if L4 checksum is needed */
-		switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
-		case PKT_TX_TCP_CKSUM:
+		if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
+		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
 			ena_tx_ctx->l4_csum_enable = true;
-			break;
-		case PKT_TX_UDP_CKSUM:
+		} else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
+			   (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
 			ena_tx_ctx->l4_csum_enable = true;
-			break;
-		default:
+		} else {
 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
 			ena_tx_ctx->l4_csum_enable = false;
-			break;
 		}
 
 		ena_meta->mss = mbuf->tso_segsz;
@@ -945,7 +956,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      __rte_unused const struct rte_eth_txconf *tx_conf)
+			      const struct rte_eth_txconf *tx_conf)
 {
 	struct ena_com_create_io_ctx ctx =
 		/* policy set to _HOST just to satisfy icc compiler */
@@ -982,6 +993,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
+	    !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
+		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+		return -EINVAL;
+	}
+
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1036,6 +1053,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
+	txq->offloads = tx_conf->offloads;
+
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
 	dev->data->tx_queues[queue_idx] = txq;
@@ -1386,6 +1405,14 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
+	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
+		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
+		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+		    tx_offloads, adapter->tx_supported_offloads);
+		return -ENOTSUP;
+	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1407,6 +1434,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
+	adapter->tx_selected_offloads = tx_offloads;
 	return 0;
 }
 
@@ -1435,13 +1463,26 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads)
+{
+	uint64_t port_offloads = adapter->tx_selected_offloads;
+
+	/* Check if port supports all requested offloads.
+	 * True if all offloads selected for queue are set for port.
+	 */
+	if ((offloads & port_offloads) != offloads)
+		return false;
+	return true;
+}
+
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
 	struct ena_adapter *adapter;
 	struct ena_com_dev *ena_dev;
 	struct ena_com_dev_get_features_ctx feat;
-	uint32_t rx_feat = 0, tx_feat = 0;
+	uint64_t rx_feat = 0, tx_feat = 0;
 	int rc = 0;
 
 	ena_assert_msg(dev->data != NULL, "Uninitialized device");
@@ -1490,6 +1531,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
+	dev_info->tx_queue_offload_capa = tx_feat;
 
 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
 	dev_info->max_rx_pktlen  = adapter->max_mtu;
@@ -1498,6 +1540,8 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = adapter->num_queues;
 	dev_info->max_tx_queues = adapter->num_queues;
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
+
+	adapter->tx_supported_offloads = tx_feat;
 }
 
 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1714,7 +1758,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		} /* there's no else as we take advantage of memset zeroing */
 
 		/* Set TX offloads flags, if applicable */
-		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);
+		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads);
 
 		if (unlikely(mbuf->ol_flags &
 			     (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index be8bc9f..3e72777 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -91,6 +91,7 @@ struct ena_ring {
 	uint8_t tx_max_header_size;
 	int configured;
 	struct ena_adapter *adapter;
+	uint64_t offloads;
 } __rte_cache_aligned;
 
 enum ena_adapter_state {
@@ -175,6 +176,8 @@ struct ena_adapter {
 	struct ena_driver_stats *drv_stats;
 	enum ena_adapter_state state;
 
+	uint64_t tx_supported_offloads;
+	uint64_t tx_selected_offloads;
 };
 
 #endif /* _ENA_ETHDEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2 2/2] net/ena: convert to new Rx offloads API
  2018-01-16 11:52 ` [PATCH 2/2] net/ena: convert to new Rx " Rafal Kozik
  2018-01-17  6:57   ` Shahaf Shuler
@ 2018-01-17  8:26   ` Rafal Kozik
  2018-01-18 15:06     ` Michał Krawczyk
  1 sibling, 1 reply; 17+ messages in thread
From: Rafal Kozik @ 2018-01-17  8:26 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, Rafal Kozik

Ethdev Rx offloads API has changed since:

commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")

This commit support the new Rx offloads API.

Signed-off-by: Rafal Kozik <rk@semihalf.com>
---
v2:
 * Use PRIx64 in printf.

 drivers/net/ena/ena_ethdev.c | 36 ++++++++++++++++++++++++++++++++++--
 drivers/net/ena/ena_ethdev.h |  2 ++
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 54ccc3d..1dfbe39 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -237,6 +237,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
 static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
 					      uint64_t offloads);
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -766,7 +768,8 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
 {
 	uint32_t max_frame_len = adapter->max_mtu;
 
-	if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1)
+	if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
+	    DEV_RX_OFFLOAD_JUMBO_FRAME)
 		max_frame_len =
 			adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
 
@@ -1066,7 +1069,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      __rte_unused const struct rte_eth_rxconf *rx_conf,
+			      const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1102,6 +1105,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
+		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+		return -EINVAL;
+	}
+
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1406,6 +1414,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 
 	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
 		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
@@ -1414,6 +1423,13 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		return -ENOTSUP;
 	}
 
+	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
+		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
+		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+		    rx_offloads, adapter->rx_supported_offloads);
+		return -ENOTSUP;
+	}
+
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
 		PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
@@ -1435,6 +1451,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	adapter->tx_selected_offloads = tx_offloads;
+	adapter->rx_selected_offloads = rx_offloads;
 	return 0;
 }
 
@@ -1476,6 +1493,19 @@ static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
 	return true;
 }
 
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+					      uint64_t offloads)
+{
+	uint64_t port_offloads = adapter->rx_selected_offloads;
+
+	/* Check if port supports all requested offloads.
+	 * True if all offloads selected for queue are set for port.
+	 */
+	if ((offloads & port_offloads) != offloads)
+		return false;
+	return true;
+}
+
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
@@ -1530,6 +1560,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
+	dev_info->rx_queue_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
 	dev_info->tx_queue_offload_capa = tx_feat;
 
@@ -1542,6 +1573,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
 	adapter->tx_supported_offloads = tx_feat;
+	adapter->rx_supported_offloads = rx_feat;
 }
 
 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 3e72777..394d05e 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -178,6 +178,8 @@ struct ena_adapter {
 
 	uint64_t tx_supported_offloads;
 	uint64_t tx_selected_offloads;
+	uint64_t rx_supported_offloads;
+	uint64_t rx_selected_offloads;
 };
 
 #endif /* _ENA_ETHDEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/2] net/ena: convert to new Tx offloads API
  2018-01-17  8:23   ` [PATCH v2 " Rafal Kozik
@ 2018-01-17 18:58     ` Ferruh Yigit
  2018-01-18  9:18       ` Rafał Kozik
  2018-01-18 14:53     ` Ferruh Yigit
  1 sibling, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-17 18:58 UTC (permalink / raw)
  To: Rafal Kozik, dev, Shahaf Shuler; +Cc: mw, mk, gtzalik, evgenys, matua, igorch

On 1/17/2018 8:23 AM, Rafal Kozik wrote:
> Ethdev Tx offloads API has changed since:
> 
> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
> 
> This commit support the new Tx offloads API. Queue configuration
> is stored in ena_ring.offloads. During preparing mbufs for tx, offloads are
> allowed only if appropriate flags in this field are set.
> 
> Signed-off-by: Rafal Kozik <rk@semihalf.com>

<...>

> @@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
>  }
>  
>  static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
> -				       struct ena_com_tx_ctx *ena_tx_ctx)
> +				       struct ena_com_tx_ctx *ena_tx_ctx,
> +				       uint64_t queue_offloads)
>  {
>  	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
>  
> -	if (mbuf->ol_flags &
> -	    (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
> +	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
> +	    (queue_offloads & QUEUE_OFFLOADS)) {
>  		/* check if TSO is required */
> -		if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
> +		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
>  			ena_tx_ctx->tso_enable = true;
>  
>  			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
>  		}
>  
>  		/* check if L3 checksum is needed */
> -		if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
> +		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
>  			ena_tx_ctx->l3_csum_enable = true;

This function is fast path right?
Do you really need new extra check to queue_offloads, isn't that information is
for setup phase?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/2] net/ena: convert to new Tx offloads API
  2018-01-17  6:56   ` Shahaf Shuler
@ 2018-01-17 18:58     ` Ferruh Yigit
  2018-01-18  6:26       ` Shahaf Shuler
  0 siblings, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-17 18:58 UTC (permalink / raw)
  To: Shahaf Shuler, Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch

On 1/17/2018 6:56 AM, Shahaf Shuler wrote:
> Tuesday, January 16, 2018 1:53 PM, Rafal Kozik:
>> Subject: [dpdk-dev] [PATCH 1/2] net/ena: convert to new Tx offloads API
>>
>> Ethdev Tx offloads API has changed since:
>>
>> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
>>
>> This commit support the new Tx offloads API. Queue configuration is stored
>> in ena_ring.offloads. During preparing mbufs for tx, offloads are allowed only
>> if appropriate flags in this field are set.
>>
>> Signed-off-by: Rafal Kozik <rk@semihalf.com>
>> ---
>>  drivers/net/ena/ena_ethdev.c | 73
>> +++++++++++++++++++++++++++++++++++---------
>>  drivers/net/ena/ena_ethdev.h |  3 ++
>>  2 files changed, 61 insertions(+), 15 deletions(-)
>>
>> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
>> index 22db895..6473776 100644
>> --- a/drivers/net/ena/ena_ethdev.c
>> +++ b/drivers/net/ena/ena_ethdev.c
>> @@ -164,6 +164,14 @@ static const struct ena_stats
>> ena_stats_ena_com_strings[] = {
>>  #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
>>  #define ENA_STATS_ARRAY_ENA_COM
>> 	ARRAY_SIZE(ena_stats_ena_com_strings)
>>
>> +#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
>> +			DEV_TX_OFFLOAD_UDP_CKSUM |\
>> +			DEV_TX_OFFLOAD_IPV4_CKSUM |\
>> +			DEV_TX_OFFLOAD_TCP_TSO)
>> +#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
>> +		       PKT_TX_IP_CKSUM |\
>> +		       PKT_TX_TCP_SEG)
>> +
>>  /** Vendor ID used by Amazon devices */  #define
>> PCI_VENDOR_ID_AMAZON 0x1D0F
>>  /** Amazon devices */
>> @@ -227,6 +235,8 @@ static int ena_rss_reta_query(struct rte_eth_dev
>> *dev,
>>  			      struct rte_eth_rss_reta_entry64 *reta_conf,
>>  			      uint16_t reta_size);
>>  static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
>> +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter
>> *adapter,
>> +					      uint64_t offloads);
>>
>>  static const struct eth_dev_ops ena_dev_ops = {
>>  	.dev_configure        = ena_dev_configure,
>> @@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct
>> rte_mbuf *mbuf,  }
>>
>>  static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
>> -				       struct ena_com_tx_ctx *ena_tx_ctx)
>> +				       struct ena_com_tx_ctx *ena_tx_ctx,
>> +				       uint64_t queue_offloads)
>>  {
>>  	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
>>
>> -	if (mbuf->ol_flags &
>> -	    (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
>> +	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
>> +	    (queue_offloads & QUEUE_OFFLOADS)) {
>>  		/* check if TSO is required */
>> -		if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
>> +		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
>> +		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
>>  			ena_tx_ctx->tso_enable = true;
>>
>>  			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
>>  		}
>>
>>  		/* check if L3 checksum is needed */
>> -		if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
>> +		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
>> +		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
>>  			ena_tx_ctx->l3_csum_enable = true;
>>
>>  		if (mbuf->ol_flags & PKT_TX_IPV6) {
>> @@ -310,19 +323,17 @@ static inline void ena_tx_mbuf_prepare(struct
>> rte_mbuf *mbuf,
>>  		}
>>
>>  		/* check if L4 checksum is needed */
>> -		switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
>> -		case PKT_TX_TCP_CKSUM:
>> +		if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
>> +		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
>>  			ena_tx_ctx->l4_proto =
>> ENA_ETH_IO_L4_PROTO_TCP;
>>  			ena_tx_ctx->l4_csum_enable = true;
>> -			break;
>> -		case PKT_TX_UDP_CKSUM:
>> +		} else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
>> +			   (queue_offloads &
>> DEV_TX_OFFLOAD_UDP_CKSUM)) {
>>  			ena_tx_ctx->l4_proto =
>> ENA_ETH_IO_L4_PROTO_UDP;
>>  			ena_tx_ctx->l4_csum_enable = true;
>> -			break;
>> -		default:
>> +		} else {
>>  			ena_tx_ctx->l4_proto =
>> ENA_ETH_IO_L4_PROTO_UNKNOWN;
>>  			ena_tx_ctx->l4_csum_enable = false;
>> -			break;
>>  		}
>>
>>  		ena_meta->mss = mbuf->tso_segsz;
>> @@ -945,7 +956,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev
>> *dev,
>>  			      uint16_t queue_idx,
>>  			      uint16_t nb_desc,
>>  			      __rte_unused unsigned int socket_id,
>> -			      __rte_unused const struct rte_eth_txconf
>> *tx_conf)
>> +			      const struct rte_eth_txconf *tx_conf)
>>  {
>>  	struct ena_com_create_io_ctx ctx =
>>  		/* policy set to _HOST just to satisfy icc compiler */ @@ -
>> 982,6 +993,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
>>  		return -EINVAL;
>>  	}
>>
>> +	if (!ena_are_tx_queue_offloads_allowed(adapter, tx_conf-
>>> offloads)) {
>> +		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
>> +		return -EINVAL;
>> +	}
> 
> Here is it better to check also the ETH_TXQ_FLAGS_IGNORE.
> If application has not yet moved to the new API, then it won't set any port Tx offloads. So for old applications, the ena_are_tx_queue_offloads_allowed is not necessary. 

But ethdev layer will set the offloads if ETH_TXQ_FLAGS_IGNORE is missing, can't
PMD always only rely on tx_conf->offloads ?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/2] net/ena: convert to new Tx offloads API
  2018-01-17 18:58     ` Ferruh Yigit
@ 2018-01-18  6:26       ` Shahaf Shuler
  2018-01-18 14:49         ` Ferruh Yigit
  0 siblings, 1 reply; 17+ messages in thread
From: Shahaf Shuler @ 2018-01-18  6:26 UTC (permalink / raw)
  To: Ferruh Yigit, Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch

Wednesday, January 17, 2018 8:58 PM, Ferruh Yigit :
> > Here is it better to check also the ETH_TXQ_FLAGS_IGNORE.
> > If application has not yet moved to the new API, then it won't set any port
> Tx offloads. So for old applications, the
> ena_are_tx_queue_offloads_allowed is not necessary.
> 
> But ethdev layer will set the offloads if ETH_TXQ_FLAGS_IGNORE is missing,
> can't PMD always only rely on tx_conf->offloads ?

This is to address PMD which moved to the new offloads API but the application still uses the old offloads API.

There are many Tx offloads which are per port (as there is a burst function per port, and not per queue). However the application will not set any offload on the device configuration rather only in the queue setup. The PMD can fail the setup function on such case, as per-port offload is requested on the queue setup without being requested first on the device configuration. 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/2] net/ena: convert to new Tx offloads API
  2018-01-17 18:58     ` Ferruh Yigit
@ 2018-01-18  9:18       ` Rafał Kozik
  2018-01-18 14:49         ` Ferruh Yigit
  0 siblings, 1 reply; 17+ messages in thread
From: Rafał Kozik @ 2018-01-18  9:18 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: dev, Shahaf Shuler, Marcin Wojtas, Michał Krawczyk, Tzalik,
	Guy, evgenys, Matushevsky, Alexander, Chauskin, Igor

2018-01-17 19:58 GMT+01:00 Ferruh Yigit <ferruh.yigit@intel.com>:
> On 1/17/2018 8:23 AM, Rafal Kozik wrote:
>> Ethdev Tx offloads API has changed since:
>>
>> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
>>
>> This commit support the new Tx offloads API. Queue configuration
>> is stored in ena_ring.offloads. During preparing mbufs for tx, offloads are
>> allowed only if appropriate flags in this field are set.
>>
>> Signed-off-by: Rafal Kozik <rk@semihalf.com>
>
> <...>
>
>> @@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
>>  }
>>
>>  static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
>> -                                    struct ena_com_tx_ctx *ena_tx_ctx)
>> +                                    struct ena_com_tx_ctx *ena_tx_ctx,
>> +                                    uint64_t queue_offloads)
>>  {
>>       struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
>>
>> -     if (mbuf->ol_flags &
>> -         (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
>> +     if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
>> +         (queue_offloads & QUEUE_OFFLOADS)) {
>>               /* check if TSO is required */
>> -             if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
>> +             if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
>> +                 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
>>                       ena_tx_ctx->tso_enable = true;
>>
>>                       ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
>>               }
>>
>>               /* check if L3 checksum is needed */
>> -             if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
>> +             if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
>> +                 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
>>                       ena_tx_ctx->l3_csum_enable = true;
>
> This function is fast path right?
> Do you really need new extra check to queue_offloads, isn't that information is
> for setup phase?
>

ENA does not have a switch for enabling/disabling offloads during configuration.
We must use additional variable and track it, otherwise the driver could use
checksum offloads by enabling it in mbuf although it was disabled in
queue configuration.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/2] net/ena: convert to new Tx offloads API
  2018-01-18  6:26       ` Shahaf Shuler
@ 2018-01-18 14:49         ` Ferruh Yigit
  0 siblings, 0 replies; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-18 14:49 UTC (permalink / raw)
  To: Shahaf Shuler, Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch

On 1/18/2018 6:26 AM, Shahaf Shuler wrote:
> Wednesday, January 17, 2018 8:58 PM, Ferruh Yigit :
>>> Here is it better to check also the ETH_TXQ_FLAGS_IGNORE.
>>> If application has not yet moved to the new API, then it won't set any port
>> Tx offloads. So for old applications, the
>> ena_are_tx_queue_offloads_allowed is not necessary.
>>
>> But ethdev layer will set the offloads if ETH_TXQ_FLAGS_IGNORE is missing,
>> can't PMD always only rely on tx_conf->offloads ?
> 
> This is to address PMD which moved to the new offloads API but the application still uses the old offloads API.
> 
> There are many Tx offloads which are per port (as there is a burst function per port, and not per queue). However the application will not set any offload on the device configuration rather only in the queue setup. The PMD can fail the setup function on such case, as per-port offload is requested on the queue setup without being requested first on the device configuration. 

I see, dev->data->dev_conf.txmode.offloads will be empty for old applications
which ena_are_tx_queue_offloads_allowed() compares against txq->offload. So I
agree, thanks.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/2] net/ena: convert to new Tx offloads API
  2018-01-18  9:18       ` Rafał Kozik
@ 2018-01-18 14:49         ` Ferruh Yigit
  0 siblings, 0 replies; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-18 14:49 UTC (permalink / raw)
  To: Rafał Kozik
  Cc: dev, Shahaf Shuler, Marcin Wojtas, Michał Krawczyk, Tzalik,
	Guy, evgenys, Matushevsky, Alexander, Chauskin, Igor

On 1/18/2018 9:18 AM, Rafał Kozik wrote:
> 2018-01-17 19:58 GMT+01:00 Ferruh Yigit <ferruh.yigit@intel.com>:
>> On 1/17/2018 8:23 AM, Rafal Kozik wrote:
>>> Ethdev Tx offloads API has changed since:
>>>
>>> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
>>>
>>> This commit support the new Tx offloads API. Queue configuration
>>> is stored in ena_ring.offloads. During preparing mbufs for tx, offloads are
>>> allowed only if appropriate flags in this field are set.
>>>
>>> Signed-off-by: Rafal Kozik <rk@semihalf.com>
>>
>> <...>
>>
>>> @@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
>>>  }
>>>
>>>  static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
>>> -                                    struct ena_com_tx_ctx *ena_tx_ctx)
>>> +                                    struct ena_com_tx_ctx *ena_tx_ctx,
>>> +                                    uint64_t queue_offloads)
>>>  {
>>>       struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
>>>
>>> -     if (mbuf->ol_flags &
>>> -         (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
>>> +     if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
>>> +         (queue_offloads & QUEUE_OFFLOADS)) {
>>>               /* check if TSO is required */
>>> -             if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
>>> +             if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
>>> +                 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
>>>                       ena_tx_ctx->tso_enable = true;
>>>
>>>                       ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
>>>               }
>>>
>>>               /* check if L3 checksum is needed */
>>> -             if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
>>> +             if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
>>> +                 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
>>>                       ena_tx_ctx->l3_csum_enable = true;
>>
>> This function is fast path right?
>> Do you really need new extra check to queue_offloads, isn't that information is
>> for setup phase?
>>
> 
> ENA does not have a switch for enabling/disabling offloads during configuration.
> We must use additional variable and track it, otherwise the driver could use
> checksum offloads by enabling it in mbuf although it was disabled in
> queue configuration.

OK, thanks for clarification.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/2] net/ena: convert to new Tx offloads API
  2018-01-17  8:23   ` [PATCH v2 " Rafal Kozik
  2018-01-17 18:58     ` Ferruh Yigit
@ 2018-01-18 14:53     ` Ferruh Yigit
  2018-01-18 14:59       ` Michał Krawczyk
  1 sibling, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-18 14:53 UTC (permalink / raw)
  To: Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch

On 1/17/2018 8:23 AM, Rafal Kozik wrote:
> Ethdev Tx offloads API has changed since:
> 
> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
> 
> This commit support the new Tx offloads API. Queue configuration
> is stored in ena_ring.offloads. During preparing mbufs for tx, offloads are
> allowed only if appropriate flags in this field are set.
> 
> Signed-off-by: Rafal Kozik <rk@semihalf.com>

Series
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>

Marcin, Michal, Guy, Evgeny,

Any comment on patch?

Since this is first contribution form Rafal, I believe it would be good to get
explicit ack for patches. I will wait for your comment.

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/2] net/ena: convert to new Tx offloads API
  2018-01-18 14:53     ` Ferruh Yigit
@ 2018-01-18 14:59       ` Michał Krawczyk
  2018-01-18 15:12         ` Ferruh Yigit
  0 siblings, 1 reply; 17+ messages in thread
From: Michał Krawczyk @ 2018-01-18 14:59 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: Rafal Kozik, dev, Marcin Wojtas, Tzalik, Guy, Schmeilin, Evgeny,
	Matushevsky, Alexander, Chauskin, Igor

Acked-by: Michal Krawczyk <mk@semihalf.com>

2018-01-18 15:53 GMT+01:00 Ferruh Yigit <ferruh.yigit@intel.com>:

> On 1/17/2018 8:23 AM, Rafal Kozik wrote:
> > Ethdev Tx offloads API has changed since:
> >
> > commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
> >
> > This commit support the new Tx offloads API. Queue configuration
> > is stored in ena_ring.offloads. During preparing mbufs for tx, offloads
> are
> > allowed only if appropriate flags in this field are set.
> >
> > Signed-off-by: Rafal Kozik <rk@semihalf.com>
>
> Series
> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
>
> Marcin, Michal, Guy, Evgeny,
>
> Any comment on patch?
>
> Since this is first contribution form Rafal, I believe it would be good to
> get
> explicit ack for patches. I will wait for your comment.
>
> Thanks,
> ferruh
>

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 2/2] net/ena: convert to new Rx offloads API
  2018-01-17  8:26   ` [PATCH v2 " Rafal Kozik
@ 2018-01-18 15:06     ` Michał Krawczyk
  0 siblings, 0 replies; 17+ messages in thread
From: Michał Krawczyk @ 2018-01-18 15:06 UTC (permalink / raw)
  To: Rafal Kozik
  Cc: dev, Marcin Wojtas, Tzalik, Guy, Schmeilin, Evgeny, Matushevsky,
	Alexander, Chauskin, Igor

Acked-by: Michal Krawczyk <mk@semihalf.com>

2018-01-17 9:26 GMT+01:00 Rafal Kozik <rk@semihalf.com>:
> Ethdev Rx offloads API has changed since:
>
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
>
> This commit support the new Rx offloads API.
>
> Signed-off-by: Rafal Kozik <rk@semihalf.com>
> ---
> v2:
>  * Use PRIx64 in printf.
>
>  drivers/net/ena/ena_ethdev.c | 36 ++++++++++++++++++++++++++++++++++--
>  drivers/net/ena/ena_ethdev.h |  2 ++
>  2 files changed, 36 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
> index 54ccc3d..1dfbe39 100644
> --- a/drivers/net/ena/ena_ethdev.c
> +++ b/drivers/net/ena/ena_ethdev.c
> @@ -237,6 +237,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
>  static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
>  static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
>                                               uint64_t offloads);
> +static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
> +                                             uint64_t offloads);
>
>  static const struct eth_dev_ops ena_dev_ops = {
>         .dev_configure        = ena_dev_configure,
> @@ -766,7 +768,8 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
>  {
>         uint32_t max_frame_len = adapter->max_mtu;
>
> -       if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1)
> +       if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
> +           DEV_RX_OFFLOAD_JUMBO_FRAME)
>                 max_frame_len =
>                         adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
>
> @@ -1066,7 +1069,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
>                               uint16_t queue_idx,
>                               uint16_t nb_desc,
>                               __rte_unused unsigned int socket_id,
> -                             __rte_unused const struct rte_eth_rxconf *rx_conf,
> +                             const struct rte_eth_rxconf *rx_conf,
>                               struct rte_mempool *mp)
>  {
>         struct ena_com_create_io_ctx ctx =
> @@ -1102,6 +1105,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
>                 return -EINVAL;
>         }
>
> +       if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
> +               RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
> +               return -EINVAL;
> +       }
> +
>         ena_qid = ENA_IO_RXQ_IDX(queue_idx);
>
>         ctx.qid = ena_qid;
> @@ -1406,6 +1414,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
>         struct ena_adapter *adapter =
>                 (struct ena_adapter *)(dev->data->dev_private);
>         uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
> +       uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
>
>         if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
>                 RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
> @@ -1414,6 +1423,13 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
>                 return -ENOTSUP;
>         }
>
> +       if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
> +               RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
> +                   "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
> +                   rx_offloads, adapter->rx_supported_offloads);
> +               return -ENOTSUP;
> +       }
> +
>         if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
>               adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
>                 PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
> @@ -1435,6 +1451,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
>         }
>
>         adapter->tx_selected_offloads = tx_offloads;
> +       adapter->rx_selected_offloads = rx_offloads;
>         return 0;
>  }
>
> @@ -1476,6 +1493,19 @@ static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
>         return true;
>  }
>
> +static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
> +                                             uint64_t offloads)
> +{
> +       uint64_t port_offloads = adapter->rx_selected_offloads;
> +
> +       /* Check if port supports all requested offloads.
> +        * True if all offloads selected for queue are set for port.
> +        */
> +       if ((offloads & port_offloads) != offloads)
> +               return false;
> +       return true;
> +}
> +
>  static void ena_infos_get(struct rte_eth_dev *dev,
>                           struct rte_eth_dev_info *dev_info)
>  {
> @@ -1530,6 +1560,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
>
>         /* Inform framework about available features */
>         dev_info->rx_offload_capa = rx_feat;
> +       dev_info->rx_queue_offload_capa = rx_feat;
>         dev_info->tx_offload_capa = tx_feat;
>         dev_info->tx_queue_offload_capa = tx_feat;
>
> @@ -1542,6 +1573,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
>         dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
>
>         adapter->tx_supported_offloads = tx_feat;
> +       adapter->rx_supported_offloads = rx_feat;
>  }
>
>  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
> index 3e72777..394d05e 100644
> --- a/drivers/net/ena/ena_ethdev.h
> +++ b/drivers/net/ena/ena_ethdev.h
> @@ -178,6 +178,8 @@ struct ena_adapter {
>
>         uint64_t tx_supported_offloads;
>         uint64_t tx_selected_offloads;
> +       uint64_t rx_supported_offloads;
> +       uint64_t rx_selected_offloads;
>  };
>
>  #endif /* _ENA_ETHDEV_H_ */
> --
> 2.7.4
>

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/2] net/ena: convert to new Tx offloads API
  2018-01-18 14:59       ` Michał Krawczyk
@ 2018-01-18 15:12         ` Ferruh Yigit
  0 siblings, 0 replies; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-18 15:12 UTC (permalink / raw)
  To: Michał Krawczyk
  Cc: Rafal Kozik, dev, Marcin Wojtas, Tzalik, Guy, Schmeilin, Evgeny,
	Matushevsky, Alexander, Chauskin, Igor

On 1/18/2018 2:59 PM, Michał Krawczyk wrote:

> 2018-01-18 15:53 GMT+01:00 Ferruh Yigit <ferruh.yigit@intel.com
> <mailto:ferruh.yigit@intel.com>>:
> 
>     On 1/17/2018 8:23 AM, Rafal Kozik wrote:
>     > Ethdev Tx offloads API has changed since:
>     >
>     > commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
>     >
>     > This commit support the new Tx offloads API. Queue configuration
>     > is stored in ena_ring.offloads. During preparing mbufs for tx, offloads are
>     > allowed only if appropriate flags in this field are set.
>     >
>     > Signed-off-by: Rafal Kozik <rk@semihalf.com <mailto:rk@semihalf.com>>
> 
>     Series
>     Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com

> Acked-by: Michal Krawczyk <mk@semihalf.com>

Series applied to dpdk-next-net/master, thanks.

(Welcome Rafal!)

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2018-01-18 15:12 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-16 11:52 [PATCH 0/2] net/ena: convert to new offloads API Rafal Kozik
2018-01-16 11:52 ` [PATCH 1/2] net/ena: convert to new Tx " Rafal Kozik
2018-01-17  6:56   ` Shahaf Shuler
2018-01-17 18:58     ` Ferruh Yigit
2018-01-18  6:26       ` Shahaf Shuler
2018-01-18 14:49         ` Ferruh Yigit
2018-01-17  8:23   ` [PATCH v2 " Rafal Kozik
2018-01-17 18:58     ` Ferruh Yigit
2018-01-18  9:18       ` Rafał Kozik
2018-01-18 14:49         ` Ferruh Yigit
2018-01-18 14:53     ` Ferruh Yigit
2018-01-18 14:59       ` Michał Krawczyk
2018-01-18 15:12         ` Ferruh Yigit
2018-01-16 11:52 ` [PATCH 2/2] net/ena: convert to new Rx " Rafal Kozik
2018-01-17  6:57   ` Shahaf Shuler
2018-01-17  8:26   ` [PATCH v2 " Rafal Kozik
2018-01-18 15:06     ` Michał Krawczyk

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.