All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] Support for new Ethdev offload APIs
@ 2018-04-09 10:26 Sunil Kumar Kori
  2018-04-09 10:26 ` [PATCH 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
                   ` (2 more replies)
  0 siblings, 3 replies; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-09 10:26 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Patchset contains changes to support ethdev offload APIs for DPAA and DPAA2
drivers.

Offloading support is categoriesed in following logical parts:
1. If requested offloading features is not supported then returned error.
2. If requested offloading feature is supoorted but cannot be disabled then
   request to disable the offload is silently discarded with a message.
3. Otherwise configuration is succesfully offloaded

Sunil Kumar Kori (2):
  net/dpaa: Changes to support ethdev offload APIs
  net/dpaa2: Changes to support ethdev offload APIs

 drivers/net/dpaa/dpaa_ethdev.c   | 46 ++++++++++++++++++++++++++---
 drivers/net/dpaa2/dpaa2_ethdev.c | 63 +++++++++++++++++++++++++++++++++-------
 drivers/net/dpaa2/dpaa2_rxtx.c   | 32 +++++++-------------
 3 files changed, 105 insertions(+), 36 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 1/2] net/dpaa: Changes to support ethdev offload APIs
  2018-04-09 10:26 [PATCH 0/2] Support for new Ethdev offload APIs Sunil Kumar Kori
@ 2018-04-09 10:26 ` Sunil Kumar Kori
  2018-04-09 13:19   ` Sunil Kumar Kori
  2018-04-09 10:26 ` [PATCH 2/2] net/dpaa2: " Sunil Kumar Kori
  2018-04-11 11:05 ` [PATCH v2 0/2] Support for new Ethdev " Sunil Kumar Kori
  2 siblings, 1 reply; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-09 10:26 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 46 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index db49364..22eb070 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -95,6 +95,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
 
+static void
+dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+
 static inline void
 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
 {
@@ -134,13 +137,42 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 }
 
 static int
-dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	uint64_t tx_offloads = eth_conf->txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+	dpaa_eth_dev_info(dev, &dev_info);
+	if (dev_info.rx_offload_capa != rx_offloads) {
+		DPAA_PMD_ERR("Some Rx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (dev_info.tx_offload_capa != tx_offloads) {
+		DPAA_PMD_ERR("Some Tx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (((rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0)) {
+			DPAA_PMD_ERR("Checksum offloading is enabled by default"
+			"Cannot be disabled. So ignoring this configuration");
+	}
+
+	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 		    DPAA_MAX_RX_PKT_LEN) {
 			fman_if_set_maxfrm(dpaa_intf->fif,
@@ -259,11 +291,17 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->rx_offload_capa =
 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM);
+		DEV_RX_OFFLOAD_TCP_CKSUM)  |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM);
+		DEV_TX_OFFLOAD_TCP_CKSUM)  |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
+
+	dev_info->default_rxconf.rx_drop_en = true;
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 2/2] net/dpaa2: Changes to support ethdev offload APIs
  2018-04-09 10:26 [PATCH 0/2] Support for new Ethdev offload APIs Sunil Kumar Kori
  2018-04-09 10:26 ` [PATCH 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
@ 2018-04-09 10:26 ` Sunil Kumar Kori
  2018-04-11 11:05 ` [PATCH v2 0/2] Support for new Ethdev " Sunil Kumar Kori
  2 siblings, 0 replies; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-09 10:26 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 63 +++++++++++++++++++++++++++++++++-------
 drivers/net/dpaa2/dpaa2_rxtx.c   | 32 +++++++-------------
 2 files changed, 63 insertions(+), 32 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 281483d..acf5f1a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -172,16 +172,24 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM |
 		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_IPV4_CKSUM |
 		DEV_TX_OFFLOAD_UDP_CKSUM |
 		DEV_TX_OFFLOAD_TCP_CKSUM |
 		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->speed_capa = ETH_LINK_SPEED_1G |
 			ETH_LINK_SPEED_2_5G |
 			ETH_LINK_SPEED_10G;
+	dev_info->default_rxconf.rx_drop_en = true;
 }
 
 static int
@@ -268,12 +276,33 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	struct fsl_mc_io *dpni = priv->hw;
 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-	int rx_ip_csum_offload = false;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	uint64_t tx_offloads = eth_conf->txmode.offloads;
+	int rx_l3_csum_offload = false;
+	int rx_l4_csum_offload = false;
+	int tx_l3_csum_offload = false;
+	int tx_l4_csum_offload = false;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_conf->rxmode.jumbo_frame == 1) {
+	dpaa2_dev_info_get(dev, &dev_info);
+	if (dev_info.rx_offload_capa != rx_offloads) {
+		DPAA2_PMD_ERR("Some Rx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (dev_info.tx_offload_capa != tx_offloads) {
+		DPAA2_PMD_ERR("Some Tx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
 			ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
 				priv->token, eth_conf->rxmode.max_rx_pkt_len);
@@ -297,32 +326,44 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.hw_ip_checksum)
-		rx_ip_csum_offload = true;
+	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+		rx_l3_csum_offload = true;
+
+	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+			       DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
 		return ret;
 	}
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+			       DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
 		return ret;
 	}
 
+	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		tx_l3_csum_offload = true;
+
+	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+		tx_l4_csum_offload = true;
+
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_TX_L3_CSUM, true);
+			       DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
 		return ret;
 	}
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_TX_L4_CSUM, true);
+			       DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
 		return ret;
@@ -343,7 +384,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.hw_vlan_filter)
+	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
 	/* update the current status */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 532de94..deadf1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -317,12 +317,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 	struct qbman_sge *sgt, *sge = NULL;
 	int i;
 
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		int ret = rte_vlan_insert(&mbuf);
-		if (ret)
-			return ret;
-	}
-
 	temp = rte_pktmbuf_alloc(mbuf->pool);
 	if (temp == NULL) {
 		DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
@@ -389,13 +383,6 @@ static void __attribute__ ((noinline)) __attribute__((hot))
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 	       struct qbman_fd *fd, uint16_t bpid)
 {
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		if (rte_vlan_insert(&mbuf)) {
-			rte_pktmbuf_free(mbuf);
-			return;
-		}
-	}
-
 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
@@ -428,12 +415,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
 	struct rte_mbuf *m;
 	void *mb = NULL;
 
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		int ret = rte_vlan_insert(&mbuf);
-		if (ret)
-			return ret;
-	}
-
 	if (rte_dpaa2_mbuf_alloc_bulk(
 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
@@ -734,8 +715,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				    priv->bp_list->dpaa2_ops_index &&
 				    (*bufs)->nb_segs == 1 &&
 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
-					if (unlikely((*bufs)->ol_flags
-						& PKT_TX_VLAN_PKT)) {
+					if (unlikely(((*bufs)->ol_flags
+						& PKT_TX_VLAN_PKT) ||
+						(dev->data->dev_conf.txmode.offloads
+						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -755,6 +738,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				goto send_n_return;
 			}
 
+			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+				(dev->data->dev_conf.txmode.offloads
+				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				int ret = rte_vlan_insert(bufs);
+				if (ret)
+					goto send_n_return;
+			}
 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
 				/* alloc should be from the default buffer pool
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 1/2] net/dpaa: Changes to support ethdev offload APIs
  2018-04-09 10:26 ` [PATCH 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
@ 2018-04-09 13:19   ` Sunil Kumar Kori
  2018-04-09 13:19     ` [PATCH 2/2] net/dpaa2: " Sunil Kumar Kori
  2018-04-10 16:40     ` [PATCH 1/2] net/dpaa: " Ferruh Yigit
  0 siblings, 2 replies; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-09 13:19 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 46 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index db49364..efef62c 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -95,6 +95,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
 
+static void
+dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+
 static inline void
 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
 {
@@ -134,13 +137,42 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 }
 
 static int
-dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	uint64_t tx_offloads = eth_conf->txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+	dpaa_eth_dev_info(dev, &dev_info);
+	if (dev_info.rx_offload_capa != rx_offloads) {
+		DPAA_PMD_ERR("Some Rx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (dev_info.tx_offload_capa != tx_offloads) {
+		DPAA_PMD_ERR("Some Tx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (((rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0)) {
+			DPAA_PMD_ERR(" Cksum offloading is enabled by default "
+			" Cannot be disabled. So ignoring this configuration ");
+	}
+
+	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 		    DPAA_MAX_RX_PKT_LEN) {
 			fman_if_set_maxfrm(dpaa_intf->fif,
@@ -259,11 +291,17 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->rx_offload_capa =
 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM);
+		DEV_RX_OFFLOAD_TCP_CKSUM)  |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM);
+		DEV_TX_OFFLOAD_TCP_CKSUM)  |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
+
+	dev_info->default_rxconf.rx_drop_en = true;
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 2/2] net/dpaa2: Changes to support ethdev offload APIs
  2018-04-09 13:19   ` Sunil Kumar Kori
@ 2018-04-09 13:19     ` Sunil Kumar Kori
  2018-04-10 16:40     ` [PATCH 1/2] net/dpaa: " Ferruh Yigit
  1 sibling, 0 replies; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-09 13:19 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 63 +++++++++++++++++++++++++++++++++-------
 drivers/net/dpaa2/dpaa2_rxtx.c   | 32 +++++++-------------
 2 files changed, 63 insertions(+), 32 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 281483d..acf5f1a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -172,16 +172,24 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM |
 		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_IPV4_CKSUM |
 		DEV_TX_OFFLOAD_UDP_CKSUM |
 		DEV_TX_OFFLOAD_TCP_CKSUM |
 		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->speed_capa = ETH_LINK_SPEED_1G |
 			ETH_LINK_SPEED_2_5G |
 			ETH_LINK_SPEED_10G;
+	dev_info->default_rxconf.rx_drop_en = true;
 }
 
 static int
@@ -268,12 +276,33 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	struct fsl_mc_io *dpni = priv->hw;
 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-	int rx_ip_csum_offload = false;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	uint64_t tx_offloads = eth_conf->txmode.offloads;
+	int rx_l3_csum_offload = false;
+	int rx_l4_csum_offload = false;
+	int tx_l3_csum_offload = false;
+	int tx_l4_csum_offload = false;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_conf->rxmode.jumbo_frame == 1) {
+	dpaa2_dev_info_get(dev, &dev_info);
+	if (dev_info.rx_offload_capa != rx_offloads) {
+		DPAA2_PMD_ERR("Some Rx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (dev_info.tx_offload_capa != tx_offloads) {
+		DPAA2_PMD_ERR("Some Tx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
 			ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
 				priv->token, eth_conf->rxmode.max_rx_pkt_len);
@@ -297,32 +326,44 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.hw_ip_checksum)
-		rx_ip_csum_offload = true;
+	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+		rx_l3_csum_offload = true;
+
+	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+			       DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
 		return ret;
 	}
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+			       DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
 		return ret;
 	}
 
+	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		tx_l3_csum_offload = true;
+
+	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+		tx_l4_csum_offload = true;
+
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_TX_L3_CSUM, true);
+			       DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
 		return ret;
 	}
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_TX_L4_CSUM, true);
+			       DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
 		return ret;
@@ -343,7 +384,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.hw_vlan_filter)
+	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
 	/* update the current status */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 532de94..deadf1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -317,12 +317,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 	struct qbman_sge *sgt, *sge = NULL;
 	int i;
 
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		int ret = rte_vlan_insert(&mbuf);
-		if (ret)
-			return ret;
-	}
-
 	temp = rte_pktmbuf_alloc(mbuf->pool);
 	if (temp == NULL) {
 		DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
@@ -389,13 +383,6 @@ static void __attribute__ ((noinline)) __attribute__((hot))
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 	       struct qbman_fd *fd, uint16_t bpid)
 {
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		if (rte_vlan_insert(&mbuf)) {
-			rte_pktmbuf_free(mbuf);
-			return;
-		}
-	}
-
 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
@@ -428,12 +415,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
 	struct rte_mbuf *m;
 	void *mb = NULL;
 
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		int ret = rte_vlan_insert(&mbuf);
-		if (ret)
-			return ret;
-	}
-
 	if (rte_dpaa2_mbuf_alloc_bulk(
 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
@@ -734,8 +715,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				    priv->bp_list->dpaa2_ops_index &&
 				    (*bufs)->nb_segs == 1 &&
 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
-					if (unlikely((*bufs)->ol_flags
-						& PKT_TX_VLAN_PKT)) {
+					if (unlikely(((*bufs)->ol_flags
+						& PKT_TX_VLAN_PKT) ||
+						(dev->data->dev_conf.txmode.offloads
+						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -755,6 +738,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				goto send_n_return;
 			}
 
+			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+				(dev->data->dev_conf.txmode.offloads
+				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				int ret = rte_vlan_insert(bufs);
+				if (ret)
+					goto send_n_return;
+			}
 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
 				/* alloc should be from the default buffer pool
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/2] net/dpaa: Changes to support ethdev offload APIs
  2018-04-09 13:19   ` Sunil Kumar Kori
  2018-04-09 13:19     ` [PATCH 2/2] net/dpaa2: " Sunil Kumar Kori
@ 2018-04-10 16:40     ` Ferruh Yigit
  1 sibling, 0 replies; 17+ messages in thread
From: Ferruh Yigit @ 2018-04-10 16:40 UTC (permalink / raw)
  To: Sunil Kumar Kori, dev; +Cc: hemant.agrawal, Shahaf Shuler

On 4/9/2018 2:19 PM, Sunil Kumar Kori wrote:
> Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
> ---
>  drivers/net/dpaa/dpaa_ethdev.c | 46 ++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 42 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
> index db49364..efef62c 100644
> --- a/drivers/net/dpaa/dpaa_ethdev.c
> +++ b/drivers/net/dpaa/dpaa_ethdev.c
> @@ -95,6 +95,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
>  
>  static struct rte_dpaa_driver rte_dpaa_pmd;
>  
> +static void
> +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
> +
>  static inline void
>  dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
>  {
> @@ -134,13 +137,42 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>  }
>  
>  static int
> -dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
> +dpaa_eth_dev_configure(struct rte_eth_dev *dev)
>  {
>  	struct dpaa_if *dpaa_intf = dev->data->dev_private;
> +	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
> +	struct rte_eth_dev_info dev_info;
> +	uint64_t rx_offloads = eth_conf->rxmode.offloads;
> +	uint64_t tx_offloads = eth_conf->txmode.offloads;
>  
>  	PMD_INIT_FUNC_TRACE();
>  
> -	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
> +	dpaa_eth_dev_info(dev, &dev_info);

It is up to you but you may prefer to keep [rt]x_offload_capa in a variable or
macro so that you can use here directly without need to call dev_info, but that
is also OK if you prefer.

> +	if (dev_info.rx_offload_capa != rx_offloads) {
> +		DPAA_PMD_ERR("Some Rx offloads are not supported "
> +			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			rx_offloads, dev_info.rx_offload_capa);
> +		return -ENOTSUP;
> +	}
> +
> +	if (dev_info.tx_offload_capa != tx_offloads) {
> +		DPAA_PMD_ERR("Some Tx offloads are not supported "
> +			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			tx_offloads, dev_info.tx_offload_capa);
> +		return -ENOTSUP;
> +	}


dev_info.rx_offload_capa is your device's offload capability. User may prefer to
utilize or not any of these offloads. So you can't return if requested offloads
are not equal to capability, this part is wrong.

Only you need to be sure that user is not asking more than what is supported.

<...>

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v2 0/2] Support for new Ethdev offload APIs
  2018-04-09 10:26 [PATCH 0/2] Support for new Ethdev offload APIs Sunil Kumar Kori
  2018-04-09 10:26 ` [PATCH 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
  2018-04-09 10:26 ` [PATCH 2/2] net/dpaa2: " Sunil Kumar Kori
@ 2018-04-11 11:05 ` Sunil Kumar Kori
  2018-04-11 11:05   ` [PATCH v2 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
                     ` (3 more replies)
  2 siblings, 4 replies; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-11 11:05 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Patchset contains changes to support ethdev offload APIs for DPAA and DPAA2
drivers.

Offloading support is categoriesed in following logical parts:
1. If requested offloading features is not supported then returned error.
2. If requested offloading feature is supoorted but cannot be disabled then
   request to disable the offload is silently discarded with a message.
3. Otherwise configuration is succesfully offloaded

[Changes in v2]
1. Incorporated review comments.

Sunil Kumar Kori (2):
  net/dpaa: Changes to support ethdev offload APIs
  net/dpaa2: Changes to support ethdev offload APIs

 drivers/net/dpaa/dpaa_ethdev.c   | 50 +++++++++++++++++++++++----
 drivers/net/dpaa2/dpaa2_ethdev.c | 75 +++++++++++++++++++++++++++++++---------
 drivers/net/dpaa2/dpaa2_rxtx.c   | 32 ++++++-----------
 3 files changed, 114 insertions(+), 43 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v2 1/2] net/dpaa: Changes to support ethdev offload APIs
  2018-04-11 11:05 ` [PATCH v2 0/2] Support for new Ethdev " Sunil Kumar Kori
@ 2018-04-11 11:05   ` Sunil Kumar Kori
  2018-04-11 11:05   ` [PATCH v2 2/2] net/dpaa2: " Sunil Kumar Kori
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-11 11:05 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 50 +++++++++++++++++++++++++++++++++++++-----
 1 file changed, 44 insertions(+), 6 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index db49364..1c632a2 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -95,6 +95,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
 
+static void
+dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+
 static inline void
 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
 {
@@ -122,9 +125,11 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
 		return -EINVAL;
 	if (frame_size > ETHER_MAX_LEN)
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads &=
+						DEV_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+						~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -134,13 +139,42 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 }
 
 static int
-dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	uint64_t tx_offloads = eth_conf->txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+	dpaa_eth_dev_info(dev, &dev_info);
+	if (((~(dev_info.rx_offload_capa) & rx_offloads) != 0)) {
+		DPAA_PMD_ERR("Some Rx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (((~(dev_info.tx_offload_capa) & tx_offloads) != 0)) {
+		DPAA_PMD_ERR("Some Tx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (((rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) ||
+		((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0)) {
+			DPAA_PMD_ERR(" Cksum offloading is enabled by default "
+			" Cannot be disabled. So ignoring this configuration ");
+	}
+
+	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 		    DPAA_MAX_RX_PKT_LEN) {
 			fman_if_set_maxfrm(dpaa_intf->fif,
@@ -259,11 +293,15 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->rx_offload_capa =
 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM);
+		DEV_RX_OFFLOAD_TCP_CKSUM)  |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM);
+		DEV_TX_OFFLOAD_TCP_CKSUM)  |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2 2/2] net/dpaa2: Changes to support ethdev offload APIs
  2018-04-11 11:05 ` [PATCH v2 0/2] Support for new Ethdev " Sunil Kumar Kori
  2018-04-11 11:05   ` [PATCH v2 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
@ 2018-04-11 11:05   ` Sunil Kumar Kori
  2018-04-12 18:17   ` [PATCH v2 0/2] Support for new Ethdev " Ferruh Yigit
  2018-04-24 15:06   ` [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks Hemant Agrawal
  3 siblings, 0 replies; 17+ messages in thread
From: Sunil Kumar Kori @ 2018-04-11 11:05 UTC (permalink / raw)
  To: dev; +Cc: sunil.kori, hemant.agrawal

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 75 +++++++++++++++++++++++++++++++---------
 drivers/net/dpaa2/dpaa2_rxtx.c   | 32 ++++++-----------
 2 files changed, 70 insertions(+), 37 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 281483d..538ac1a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -103,7 +103,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			goto next_mask;
 		}
 
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_VLAN_FILTER)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
 		else
@@ -114,7 +115,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	}
 next_mask:
 	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_VLAN_EXTEND)
 			DPAA2_PMD_INFO("VLAN extend offload not supported");
 	}
 
@@ -172,13 +174,20 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM |
 		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_IPV4_CKSUM |
 		DEV_TX_OFFLOAD_UDP_CKSUM |
 		DEV_TX_OFFLOAD_TCP_CKSUM |
 		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+		DEV_TX_OFFLOAD_MULTI_SEGS;
 	dev_info->speed_capa = ETH_LINK_SPEED_1G |
 			ETH_LINK_SPEED_2_5G |
 			ETH_LINK_SPEED_10G;
@@ -268,12 +277,33 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	struct fsl_mc_io *dpni = priv->hw;
 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-	int rx_ip_csum_offload = false;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	uint64_t tx_offloads = eth_conf->txmode.offloads;
+	int rx_l3_csum_offload = false;
+	int rx_l4_csum_offload = false;
+	int tx_l3_csum_offload = false;
+	int tx_l4_csum_offload = false;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (eth_conf->rxmode.jumbo_frame == 1) {
+	dpaa2_dev_info_get(dev, &dev_info);
+	if ((~(dev_info.rx_offload_capa) & rx_offloads) != 0) {
+		DPAA2_PMD_ERR("Some Rx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if ((~(dev_info.tx_offload_capa) & tx_offloads) != 0) {
+		DPAA2_PMD_ERR("Some Tx offloads are not supported "
+			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
+	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
 			ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
 				priv->token, eth_conf->rxmode.max_rx_pkt_len);
@@ -297,32 +327,44 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.hw_ip_checksum)
-		rx_ip_csum_offload = true;
+	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+		rx_l3_csum_offload = true;
+
+	if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+		(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+		rx_l4_csum_offload = true;
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+			       DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
 		return ret;
 	}
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+			       DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
 		return ret;
 	}
 
+	if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+		tx_l3_csum_offload = true;
+
+	if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+		(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+		(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+		tx_l4_csum_offload = true;
+
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_TX_L3_CSUM, true);
+			       DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
 		return ret;
 	}
 
 	ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-			       DPNI_OFF_TX_L4_CSUM, true);
+			       DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
 	if (ret) {
 		DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
 		return ret;
@@ -343,8 +385,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (eth_conf->rxmode.hw_vlan_filter)
-		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+	dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
 	/* update the current status */
 	dpaa2_dev_link_update(dev, 0);
@@ -949,9 +990,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 		return -EINVAL;
 
 	if (frame_size > ETHER_MAX_LEN)
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads &=
+						DEV_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+						~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 532de94..deadf1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -317,12 +317,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 	struct qbman_sge *sgt, *sge = NULL;
 	int i;
 
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		int ret = rte_vlan_insert(&mbuf);
-		if (ret)
-			return ret;
-	}
-
 	temp = rte_pktmbuf_alloc(mbuf->pool);
 	if (temp == NULL) {
 		DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
@@ -389,13 +383,6 @@ static void __attribute__ ((noinline)) __attribute__((hot))
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 	       struct qbman_fd *fd, uint16_t bpid)
 {
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		if (rte_vlan_insert(&mbuf)) {
-			rte_pktmbuf_free(mbuf);
-			return;
-		}
-	}
-
 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
@@ -428,12 +415,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
 	struct rte_mbuf *m;
 	void *mb = NULL;
 
-	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-		int ret = rte_vlan_insert(&mbuf);
-		if (ret)
-			return ret;
-	}
-
 	if (rte_dpaa2_mbuf_alloc_bulk(
 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
@@ -734,8 +715,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				    priv->bp_list->dpaa2_ops_index &&
 				    (*bufs)->nb_segs == 1 &&
 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
-					if (unlikely((*bufs)->ol_flags
-						& PKT_TX_VLAN_PKT)) {
+					if (unlikely(((*bufs)->ol_flags
+						& PKT_TX_VLAN_PKT) ||
+						(dev->data->dev_conf.txmode.offloads
+						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
 						ret = rte_vlan_insert(bufs);
 						if (ret)
 							goto send_n_return;
@@ -755,6 +738,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				goto send_n_return;
 			}
 
+			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+				(dev->data->dev_conf.txmode.offloads
+				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
+				int ret = rte_vlan_insert(bufs);
+				if (ret)
+					goto send_n_return;
+			}
 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
 				/* alloc should be from the default buffer pool
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 0/2] Support for new Ethdev offload APIs
  2018-04-11 11:05 ` [PATCH v2 0/2] Support for new Ethdev " Sunil Kumar Kori
  2018-04-11 11:05   ` [PATCH v2 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
  2018-04-11 11:05   ` [PATCH v2 2/2] net/dpaa2: " Sunil Kumar Kori
@ 2018-04-12 18:17   ` Ferruh Yigit
  2018-04-24 15:06   ` [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks Hemant Agrawal
  3 siblings, 0 replies; 17+ messages in thread
From: Ferruh Yigit @ 2018-04-12 18:17 UTC (permalink / raw)
  To: Sunil Kumar Kori, dev; +Cc: hemant.agrawal

On 4/11/2018 12:05 PM, Sunil Kumar Kori wrote:
> Patchset contains changes to support ethdev offload APIs for DPAA and DPAA2
> drivers.
> 
> Offloading support is categoriesed in following logical parts:
> 1. If requested offloading features is not supported then returned error.
> 2. If requested offloading feature is supoorted but cannot be disabled then
>    request to disable the offload is silently discarded with a message.
> 3. Otherwise configuration is succesfully offloaded
> 
> [Changes in v2]
> 1. Incorporated review comments.
> 
> Sunil Kumar Kori (2):
>   net/dpaa: Changes to support ethdev offload APIs
>   net/dpaa2: Changes to support ethdev offload APIs

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks
  2018-04-11 11:05 ` [PATCH v2 0/2] Support for new Ethdev " Sunil Kumar Kori
                     ` (2 preceding siblings ...)
  2018-04-12 18:17   ` [PATCH v2 0/2] Support for new Ethdev " Ferruh Yigit
@ 2018-04-24 15:06   ` Hemant Agrawal
  2018-04-24 15:06     ` [PATCH v3 2/2] net/dpaa2: " Hemant Agrawal
                       ` (2 more replies)
  3 siblings, 3 replies; 17+ messages in thread
From: Hemant Agrawal @ 2018-04-24 15:06 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, shreyansh.jain, Sunil Kumar Kori

From: Sunil Kumar Kori <sunil.kori@nxp.com>

Fixes: 16e2c27f4fc7 ("net/dpaa: support new ethdev offload APIs")

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 89 +++++++++++++++++++++++++++---------------
 1 file changed, 57 insertions(+), 32 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index b2740b4..32d36f2 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -45,6 +45,33 @@
 #include <fsl_bman.h>
 #include <fsl_fman.h>
 
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+		DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_CRC_STRIP |
+		DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM |
+		DEV_TX_OFFLOAD_SCTP_CKSUM |
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_MULTI_SEGS |
+		DEV_TX_OFFLOAD_MT_LOCKFREE |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
 /* At present we only allow up to 4 push mode queues - as each of this queue
@@ -143,35 +170,41 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
 	uint64_t tx_offloads = eth_conf->txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	dpaa_eth_dev_info(dev, &dev_info);
-	if (((~(dev_info.rx_offload_capa) & rx_offloads) != 0)) {
-		DPAA_PMD_ERR("Some Rx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
+	/* Rx offloads validation */
+	if (dev_rx_offloads_nodis & ~rx_offloads) {
+		DPAA_PMD_WARN(
+		"Rx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			rx_offloads, dev_rx_offloads_nodis);
 	}
-
-	if (((~(dev_info.tx_offload_capa) & tx_offloads) != 0)) {
-		DPAA_PMD_ERR("Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, dev_info.tx_offload_capa);
+	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
+		DPAA_PMD_ERR(
+		"Rx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			rx_offloads,
+			dev_rx_offloads_sup | dev_rx_offloads_nodis);
 		return -ENOTSUP;
 	}
 
-	if (((rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0) ||
-		((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) == 0) ||
-		((rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) == 0) ||
-		((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) ||
-		((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) ||
-		((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0)) {
-			DPAA_PMD_ERR(" Cksum offloading is enabled by default "
-			" Cannot be disabled. So ignoring this configuration ");
+	/* Tx offloads validation */
+	if (dev_tx_offloads_nodis & ~tx_offloads) {
+		DPAA_PMD_WARN(
+		"Tx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			tx_offloads, dev_tx_offloads_nodis);
+	}
+	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
+		DPAA_PMD_ERR(
+		"Tx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			tx_offloads,
+			dev_tx_offloads_sup | dev_tx_offloads_nodis);
+		return -ENOTSUP;
 	}
 
 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
@@ -290,18 +323,10 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
 				ETH_LINK_SPEED_10G);
-	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM)  |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa =
-		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM)  |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = dev_rx_offloads_sup |
+					dev_rx_offloads_nodis;
+	dev_info->tx_offload_capa = dev_tx_offloads_sup |
+					dev_tx_offloads_nodis;
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v3 2/2] net/dpaa2: fix the ethdev offload checks
  2018-04-24 15:06   ` [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks Hemant Agrawal
@ 2018-04-24 15:06     ` Hemant Agrawal
  2018-04-24 16:43     ` [PATCH v3 1/2] net/dpaa: " Ferruh Yigit
  2018-04-24 17:16     ` [PATCH v4 " Hemant Agrawal
  2 siblings, 0 replies; 17+ messages in thread
From: Hemant Agrawal @ 2018-04-24 15:06 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, shreyansh.jain, Sunil Kumar Kori

From: Sunil Kumar Kori <sunil.kori@nxp.com>

Fixes: 0ebce6129bc6 ("net/dpaa2: support new ethdev offload APIs")

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 88 +++++++++++++++++++++++++++-------------
 1 file changed, 60 insertions(+), 28 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 54ab9eb..96a1cc4 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -27,6 +27,36 @@
 #include "dpaa2_ethdev.h"
 #include <fsl_qbman_debug.h>
 
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+		DEV_RX_OFFLOAD_CRC_STRIP |
+		DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM |
+		DEV_TX_OFFLOAD_SCTP_CKSUM |
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+		DEV_TX_OFFLOAD_MULTI_SEGS |
+		DEV_TX_OFFLOAD_MT_LOCKFREE |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
 struct rte_dpaa2_xstats_name_off {
 	char name[RTE_ETH_XSTATS_NAME_SIZE];
 	uint8_t page_id; /* dpni statistics page id */
@@ -170,24 +200,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
 	dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = dev_rx_offloads_sup |
+					dev_rx_offloads_nodis;
+	dev_info->tx_offload_capa = dev_tx_offloads_sup |
+					dev_tx_offloads_nodis;
 	dev_info->speed_capa = ETH_LINK_SPEED_1G |
 			ETH_LINK_SPEED_2_5G |
 			ETH_LINK_SPEED_10G;
@@ -277,7 +293,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	struct fsl_mc_io *dpni = priv->hw;
 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
 	uint64_t tx_offloads = eth_conf->txmode.offloads;
 	int rx_l3_csum_offload = false;
@@ -288,18 +303,35 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	dpaa2_dev_info_get(dev, &dev_info);
-	if ((~(dev_info.rx_offload_capa) & rx_offloads) != 0) {
-		DPAA2_PMD_ERR("Some Rx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			rx_offloads, dev_info.rx_offload_capa);
+	/* Rx offloads validation */
+	if (dev_rx_offloads_nodis & ~rx_offloads) {
+		DPAA2_PMD_WARN(
+		"Rx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			rx_offloads, dev_rx_offloads_nodis);
+	}
+	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
+		DPAA2_PMD_ERR(
+		"Rx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			rx_offloads,
+			dev_rx_offloads_sup | dev_rx_offloads_nodis);
 		return -ENOTSUP;
 	}
 
-	if ((~(dev_info.tx_offload_capa) & tx_offloads) != 0) {
-		DPAA2_PMD_ERR("Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, dev_info.tx_offload_capa);
+	/* Tx offloads validation */
+	if (dev_tx_offloads_nodis & ~tx_offloads) {
+		DPAA2_PMD_WARN(
+		"Tx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			tx_offloads, dev_tx_offloads_nodis);
+	}
+	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
+		DPAA2_PMD_ERR(
+		"Tx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			tx_offloads,
+			dev_tx_offloads_sup | dev_tx_offloads_nodis);
 		return -ENOTSUP;
 	}
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks
  2018-04-24 15:06   ` [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks Hemant Agrawal
  2018-04-24 15:06     ` [PATCH v3 2/2] net/dpaa2: " Hemant Agrawal
@ 2018-04-24 16:43     ` Ferruh Yigit
  2018-04-24 17:23       ` Hemant Agrawal
  2018-04-24 17:16     ` [PATCH v4 " Hemant Agrawal
  2 siblings, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2018-04-24 16:43 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: shreyansh.jain, Sunil Kumar Kori

On 4/24/2018 4:06 PM, Hemant Agrawal wrote:
> From: Sunil Kumar Kori <sunil.kori@nxp.com>
> 
> Fixes: 16e2c27f4fc7 ("net/dpaa: support new ethdev offload APIs")
> 
> Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
> ---
>  drivers/net/dpaa/dpaa_ethdev.c | 89 +++++++++++++++++++++++++++---------------
>  1 file changed, 57 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
> index b2740b4..32d36f2 100644
> --- a/drivers/net/dpaa/dpaa_ethdev.c
> +++ b/drivers/net/dpaa/dpaa_ethdev.c
> @@ -45,6 +45,33 @@
>  #include <fsl_bman.h>
>  #include <fsl_fman.h>
>  
> +/* Supported Rx offloads */
> +static uint64_t dev_rx_offloads_sup =
> +		DEV_RX_OFFLOAD_JUMBO_FRAME;
> +
> +/* Rx offloads which cannot be disabled */
> +static uint64_t dev_rx_offloads_nodis =
> +		DEV_RX_OFFLOAD_IPV4_CKSUM |
> +		DEV_RX_OFFLOAD_UDP_CKSUM |
> +		DEV_RX_OFFLOAD_TCP_CKSUM |
> +		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
> +		DEV_RX_OFFLOAD_CRC_STRIP |
> +		DEV_RX_OFFLOAD_SCATTER;
> +
> +/* Supported Tx offloads */
> +static uint64_t dev_tx_offloads_sup;
> +
> +/* Tx offloads which cannot be disabled */
> +static uint64_t dev_tx_offloads_nodis =
> +		DEV_TX_OFFLOAD_IPV4_CKSUM |
> +		DEV_TX_OFFLOAD_UDP_CKSUM |
> +		DEV_TX_OFFLOAD_TCP_CKSUM |
> +		DEV_TX_OFFLOAD_SCTP_CKSUM |
> +		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
> +		DEV_TX_OFFLOAD_MULTI_SEGS |
> +		DEV_TX_OFFLOAD_MT_LOCKFREE |
> +		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
> +
>  /* Keep track of whether QMAN and BMAN have been globally initialized */
>  static int is_global_init;
>  /* At present we only allow up to 4 push mode queues - as each of this queue
> @@ -143,35 +170,41 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
>  {
>  	struct dpaa_if *dpaa_intf = dev->data->dev_private;
>  	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
> -	struct rte_eth_dev_info dev_info;
>  	uint64_t rx_offloads = eth_conf->rxmode.offloads;
>  	uint64_t tx_offloads = eth_conf->txmode.offloads;
>  
>  	PMD_INIT_FUNC_TRACE();
>  
> -	dpaa_eth_dev_info(dev, &dev_info);
> -	if (((~(dev_info.rx_offload_capa) & rx_offloads) != 0)) {
> -		DPAA_PMD_ERR("Some Rx offloads are not supported "
> -			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
> -			rx_offloads, dev_info.rx_offload_capa);
> -		return -ENOTSUP;
> +	/* Rx offloads validation */
> +	if (dev_rx_offloads_nodis & ~rx_offloads) {
> +		DPAA_PMD_WARN(
> +		"Rx offloads non configurable - requested 0x%" PRIx64
> +		" ignored 0x%" PRIx64,
> +			rx_offloads, dev_rx_offloads_nodis);
>  	}
> -
> -	if (((~(dev_info.tx_offload_capa) & tx_offloads) != 0)) {
> -		DPAA_PMD_ERR("Some Tx offloads are not supported "
> -			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
> -			tx_offloads, dev_info.tx_offload_capa);
> +	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
> +		DPAA_PMD_ERR(
> +		"Rx offloads non supported - requested 0x%" PRIx64
> +		" supported 0x%" PRIx64,
> +			rx_offloads,
> +			dev_rx_offloads_sup | dev_rx_offloads_nodis);
>  		return -ENOTSUP;
>  	}

Hi Hemant,

Overall this looks good to me, thanks.

Only I would like to ask if you prefer to replace nodis and not_supported checks.

Because with current order, if an offlaod requested that both has not supported
offload and not enable all nodis offloads, this will print both logs and return
error. Since it will return error, do you really need "non configurable" log?

If you replace checks, if any not supported offload requested it will only print
log for it and return error without checking/caring nodis offloads.

It is up to you, please let me know if you want to go with existing set.

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v4 1/2] net/dpaa: fix the ethdev offload checks
  2018-04-24 15:06   ` [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks Hemant Agrawal
  2018-04-24 15:06     ` [PATCH v3 2/2] net/dpaa2: " Hemant Agrawal
  2018-04-24 16:43     ` [PATCH v3 1/2] net/dpaa: " Ferruh Yigit
@ 2018-04-24 17:16     ` Hemant Agrawal
  2018-04-24 17:16       ` [PATCH v4 2/2] net/dpaa2: " Hemant Agrawal
  2018-04-24 18:04       ` [PATCH v4 1/2] net/dpaa: " Ferruh Yigit
  2 siblings, 2 replies; 17+ messages in thread
From: Hemant Agrawal @ 2018-04-24 17:16 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, shreyansh.jain, Sunil Kumar Kori

From: Sunil Kumar Kori <sunil.kori@nxp.com>

Fixes: 16e2c27f4fc7 ("net/dpaa: support new ethdev offload APIs")

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 87 +++++++++++++++++++++++++++---------------
 1 file changed, 56 insertions(+), 31 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index b2740b4..6bf8c15 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -45,6 +45,33 @@
 #include <fsl_bman.h>
 #include <fsl_fman.h>
 
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+		DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_CRC_STRIP |
+		DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM |
+		DEV_TX_OFFLOAD_SCTP_CKSUM |
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_MULTI_SEGS |
+		DEV_TX_OFFLOAD_MT_LOCKFREE |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
 /* At present we only allow up to 4 push mode queues - as each of this queue
@@ -143,35 +170,41 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
 	uint64_t tx_offloads = eth_conf->txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	dpaa_eth_dev_info(dev, &dev_info);
-	if (((~(dev_info.rx_offload_capa) & rx_offloads) != 0)) {
-		DPAA_PMD_ERR("Some Rx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			rx_offloads, dev_info.rx_offload_capa);
+	/* Rx offloads validation */
+	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
+		DPAA_PMD_ERR(
+		"Rx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			rx_offloads,
+			dev_rx_offloads_sup | dev_rx_offloads_nodis);
 		return -ENOTSUP;
 	}
+	if (dev_rx_offloads_nodis & ~rx_offloads) {
+		DPAA_PMD_WARN(
+		"Rx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			rx_offloads, dev_rx_offloads_nodis);
+	}
 
-	if (((~(dev_info.tx_offload_capa) & tx_offloads) != 0)) {
-		DPAA_PMD_ERR("Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, dev_info.tx_offload_capa);
+	/* Tx offloads validation */
+	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
+		DPAA_PMD_ERR(
+		"Tx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			tx_offloads,
+			dev_tx_offloads_sup | dev_tx_offloads_nodis);
 		return -ENOTSUP;
 	}
-
-	if (((rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0) ||
-		((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) == 0) ||
-		((rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) == 0) ||
-		((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) ||
-		((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) ||
-		((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0)) {
-			DPAA_PMD_ERR(" Cksum offloading is enabled by default "
-			" Cannot be disabled. So ignoring this configuration ");
+	if (dev_tx_offloads_nodis & ~tx_offloads) {
+		DPAA_PMD_WARN(
+		"Tx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			tx_offloads, dev_tx_offloads_nodis);
 	}
 
 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
@@ -290,18 +323,10 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
 				ETH_LINK_SPEED_10G);
-	dev_info->rx_offload_capa =
-		(DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM   |
-		DEV_RX_OFFLOAD_TCP_CKSUM)  |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa =
-		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM)  |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = dev_rx_offloads_sup |
+					dev_rx_offloads_nodis;
+	dev_info->tx_offload_capa = dev_tx_offloads_sup |
+					dev_tx_offloads_nodis;
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v4 2/2] net/dpaa2: fix the ethdev offload checks
  2018-04-24 17:16     ` [PATCH v4 " Hemant Agrawal
@ 2018-04-24 17:16       ` Hemant Agrawal
  2018-04-24 18:04       ` [PATCH v4 1/2] net/dpaa: " Ferruh Yigit
  1 sibling, 0 replies; 17+ messages in thread
From: Hemant Agrawal @ 2018-04-24 17:16 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, shreyansh.jain, Sunil Kumar Kori

From: Sunil Kumar Kori <sunil.kori@nxp.com>

Fixes: 0ebce6129bc6 ("net/dpaa2: support new ethdev offload APIs")

Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 88 +++++++++++++++++++++++++++-------------
 1 file changed, 60 insertions(+), 28 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 54ab9eb..2f34022 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -27,6 +27,36 @@
 #include "dpaa2_ethdev.h"
 #include <fsl_qbman_debug.h>
 
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+		DEV_RX_OFFLOAD_CRC_STRIP |
+		DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM |
+		DEV_TX_OFFLOAD_SCTP_CKSUM |
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+		DEV_TX_OFFLOAD_MULTI_SEGS |
+		DEV_TX_OFFLOAD_MT_LOCKFREE |
+		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
 struct rte_dpaa2_xstats_name_off {
 	char name[RTE_ETH_XSTATS_NAME_SIZE];
 	uint8_t page_id; /* dpni statistics page id */
@@ -170,24 +200,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
 	dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
 	dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM |
-		DEV_RX_OFFLOAD_TCP_CKSUM |
-		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_VLAN_FILTER |
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_JUMBO_FRAME |
-		DEV_RX_OFFLOAD_SCATTER;
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_UDP_CKSUM |
-		DEV_TX_OFFLOAD_TCP_CKSUM |
-		DEV_TX_OFFLOAD_SCTP_CKSUM |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_MBUF_FAST_FREE |
-		DEV_TX_OFFLOAD_MULTI_SEGS;
+	dev_info->rx_offload_capa = dev_rx_offloads_sup |
+					dev_rx_offloads_nodis;
+	dev_info->tx_offload_capa = dev_tx_offloads_sup |
+					dev_tx_offloads_nodis;
 	dev_info->speed_capa = ETH_LINK_SPEED_1G |
 			ETH_LINK_SPEED_2_5G |
 			ETH_LINK_SPEED_10G;
@@ -277,7 +293,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	struct fsl_mc_io *dpni = priv->hw;
 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
 	uint64_t tx_offloads = eth_conf->txmode.offloads;
 	int rx_l3_csum_offload = false;
@@ -288,20 +303,37 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	dpaa2_dev_info_get(dev, &dev_info);
-	if ((~(dev_info.rx_offload_capa) & rx_offloads) != 0) {
-		DPAA2_PMD_ERR("Some Rx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			rx_offloads, dev_info.rx_offload_capa);
+	/* Rx offloads validation */
+	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
+		DPAA2_PMD_ERR(
+		"Rx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			rx_offloads,
+			dev_rx_offloads_sup | dev_rx_offloads_nodis);
 		return -ENOTSUP;
 	}
+	if (dev_rx_offloads_nodis & ~rx_offloads) {
+		DPAA2_PMD_WARN(
+		"Rx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			rx_offloads, dev_rx_offloads_nodis);
+	}
 
-	if ((~(dev_info.tx_offload_capa) & tx_offloads) != 0) {
-		DPAA2_PMD_ERR("Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, dev_info.tx_offload_capa);
+	/* Tx offloads validation */
+	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
+		DPAA2_PMD_ERR(
+		"Tx offloads non supported - requested 0x%" PRIx64
+		" supported 0x%" PRIx64,
+			tx_offloads,
+			dev_tx_offloads_sup | dev_tx_offloads_nodis);
 		return -ENOTSUP;
 	}
+	if (dev_tx_offloads_nodis & ~tx_offloads) {
+		DPAA2_PMD_WARN(
+		"Tx offloads non configurable - requested 0x%" PRIx64
+		" ignored 0x%" PRIx64,
+			tx_offloads, dev_tx_offloads_nodis);
+	}
 
 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks
  2018-04-24 16:43     ` [PATCH v3 1/2] net/dpaa: " Ferruh Yigit
@ 2018-04-24 17:23       ` Hemant Agrawal
  0 siblings, 0 replies; 17+ messages in thread
From: Hemant Agrawal @ 2018-04-24 17:23 UTC (permalink / raw)
  To: Ferruh Yigit, dev; +Cc: Shreyansh Jain, Sunil Kumar Kori

HI Ferruh,


> Hi Hemant,
> 
> Overall this looks good to me, thanks.
> 
> Only I would like to ask if you prefer to replace nodis and not_supported checks.
> 
> Because with current order, if an offlaod requested that both has not supported
> offload and not enable all nodis offloads, this will print both logs and return
> error. Since it will return error, do you really need "non configurable" log?
> 
> If you replace checks, if any not supported offload requested it will only print log
> for it and return error without checking/caring nodis offloads.
> 
> It is up to you, please let me know if you want to go with existing set.

[Hemant]  Thanks for the review. In v4, I have reversed the order of check

Regards,
Hemant

> 
> Thanks,
> ferruh


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v4 1/2] net/dpaa: fix the ethdev offload checks
  2018-04-24 17:16     ` [PATCH v4 " Hemant Agrawal
  2018-04-24 17:16       ` [PATCH v4 2/2] net/dpaa2: " Hemant Agrawal
@ 2018-04-24 18:04       ` Ferruh Yigit
  1 sibling, 0 replies; 17+ messages in thread
From: Ferruh Yigit @ 2018-04-24 18:04 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: shreyansh.jain, Sunil Kumar Kori

On 4/24/2018 6:16 PM, Hemant Agrawal wrote:
> From: Sunil Kumar Kori <sunil.kori@nxp.com>
> 
> Fixes: 16e2c27f4fc7 ("net/dpaa: support new ethdev offload APIs")
> 
> Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2018-04-24 18:05 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-09 10:26 [PATCH 0/2] Support for new Ethdev offload APIs Sunil Kumar Kori
2018-04-09 10:26 ` [PATCH 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
2018-04-09 13:19   ` Sunil Kumar Kori
2018-04-09 13:19     ` [PATCH 2/2] net/dpaa2: " Sunil Kumar Kori
2018-04-10 16:40     ` [PATCH 1/2] net/dpaa: " Ferruh Yigit
2018-04-09 10:26 ` [PATCH 2/2] net/dpaa2: " Sunil Kumar Kori
2018-04-11 11:05 ` [PATCH v2 0/2] Support for new Ethdev " Sunil Kumar Kori
2018-04-11 11:05   ` [PATCH v2 1/2] net/dpaa: Changes to support ethdev " Sunil Kumar Kori
2018-04-11 11:05   ` [PATCH v2 2/2] net/dpaa2: " Sunil Kumar Kori
2018-04-12 18:17   ` [PATCH v2 0/2] Support for new Ethdev " Ferruh Yigit
2018-04-24 15:06   ` [PATCH v3 1/2] net/dpaa: fix the ethdev offload checks Hemant Agrawal
2018-04-24 15:06     ` [PATCH v3 2/2] net/dpaa2: " Hemant Agrawal
2018-04-24 16:43     ` [PATCH v3 1/2] net/dpaa: " Ferruh Yigit
2018-04-24 17:23       ` Hemant Agrawal
2018-04-24 17:16     ` [PATCH v4 " Hemant Agrawal
2018-04-24 17:16       ` [PATCH v4 2/2] net/dpaa2: " Hemant Agrawal
2018-04-24 18:04       ` [PATCH v4 1/2] net/dpaa: " Ferruh Yigit

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.