All of lore.kernel.org
 help / color / mirror / Atom feed
* [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver
@ 2019-09-06 12:00 Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director Yahui Cao
                   ` (11 more replies)
  0 siblings, 12 replies; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

This patch series adds Intel Columbiaville 800 Series Ethernet Flow
Director support for RTE_FLOW

- Patch 01-03 are FDIR init,teardown and configuration
- Remaining patches are FDIR RTE_FLOW enablement

The patchset depends on:
http://patchwork.dpdk.org/project/dpdk/list/?series=6228
---
Beilei Xing (3):
  net/ice: initialize and set up flow director
  net/ice: tear down flow director
  net/ice: enable input set configuration

Yahui Cao (9):
  net/ice: add FDIR create and destroy
  net/ice: add FDIR mark action support
  net/ice: add hash table for FDIR
  net/ice: enable FDIR queue group
  net/ice: add FDIR dst mac support
  net/ice: add FDIR counter resource init/release
  net/ice: add FDIR counter support for flow id
  net/ice: add FDIR counter support for flow shared
  net/ice: add FDIR non-word aligned field support

 drivers/net/ice/Makefile          |    1 +
 drivers/net/ice/ice_ethdev.c      |  107 +-
 drivers/net/ice/ice_ethdev.h      |   85 ++
 drivers/net/ice/ice_fdir_filter.c | 1514 +++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.c        |  505 ++++++++++
 drivers/net/ice/ice_rxtx.h        |    9 +
 drivers/net/ice/meson.build       |    3 +-
 7 files changed, 2204 insertions(+), 20 deletions(-)
 create mode 100644 drivers/net/ice/ice_fdir_filter.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-07 11:01   ` Ye Xiaolong
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 02/12] net/ice: tear down " Yahui Cao
                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

From: Beilei Xing <beilei.xing@intel.com>

Enable flow director, include:
 - Create control VSI
 - Queue pair allocated and set up
 - Programming packet

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/ice/Makefile          |   1 +
 drivers/net/ice/ice_ethdev.c      | 107 +++++--
 drivers/net/ice/ice_ethdev.h      |  19 ++
 drivers/net/ice/ice_fdir_filter.c | 139 +++++++++
 drivers/net/ice/ice_rxtx.c        | 448 ++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.h        |   7 +
 drivers/net/ice/meson.build       |   3 +-
 7 files changed, 704 insertions(+), 20 deletions(-)
 create mode 100644 drivers/net/ice/ice_fdir_filter.c

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index ae53c2646..cbbd03fcf 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -62,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 647aca3ed..cb32f08df 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1097,11 +1097,20 @@ ice_pf_sw_init(struct rte_eth_dev *dev)
 				  hw->func_caps.common_cap.num_rxq);
 
 	pf->lan_nb_qps = pf->lan_nb_qp_max;
+	if (hw->func_caps.fd_fltr_guar > 0 ||
+	    hw->func_caps.fd_fltr_best_effort > 0) {
+		pf->flags |= ICE_FLAG_FDIR;
+		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
+		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
+	} else {
+		pf->fdir_nb_qps = 0;
+	}
+	pf->fdir_qp_offset = 0;
 
 	return 0;
 }
 
-static struct ice_vsi *
+struct ice_vsi *
 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 {
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -1113,6 +1122,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	struct rte_ether_addr mac_addr;
 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
 	uint8_t tc_bitmap = 0x1;
+	uint16_t cfg;
 
 	/* hw->num_lports = 1 in NIC mode */
 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
@@ -1136,14 +1146,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
-	/* base_queue in used in queue mapping of VSI add/update command.
-	 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
-	 * cases in the first stage. Only Main VSI.
-	 */
-	vsi->base_queue = 0;
 	switch (type) {
 	case ICE_VSI_PF:
 		vsi->nb_qps = pf->lan_nb_qps;
+		vsi->base_queue = 1;
 		ice_vsi_config_default_rss(&vsi_ctx.info);
 		vsi_ctx.alloc_from_pool = true;
 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
@@ -1157,6 +1163,18 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 		vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+
+		/* FDIR */
+		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
+			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
+		cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
+		vsi_ctx.info.max_fd_fltr_dedicated =
+			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
+		vsi_ctx.info.max_fd_fltr_shared =
+			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
+
 		/* Enable VLAN/UP trip */
 		ret = ice_vsi_config_tc_queue_mapping(vsi,
 						      &vsi_ctx.info,
@@ -1169,6 +1187,28 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 			goto fail_mem;
 		}
 
+		break;
+	case ICE_VSI_CTRL:
+		vsi->nb_qps = pf->fdir_nb_qps;
+		vsi->base_queue = ICE_FDIR_QUEUE_ID;
+		vsi_ctx.alloc_from_pool = true;
+		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
+
+		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
+		cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
+		vsi_ctx.info.sw_id = hw->port_info->sw_id;
+		ret = ice_vsi_config_tc_queue_mapping(vsi,
+						      &vsi_ctx.info,
+						      ICE_DEFAULT_TCMAP);
+		if (ret) {
+			PMD_INIT_LOG(ERR,
+				     "tc queue mapping with vsi failed, "
+				     "err = %d",
+				     ret);
+			goto fail_mem;
+		}
 		break;
 	default:
 		/* for other types of VSI */
@@ -1187,10 +1227,19 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 		}
 		vsi->msix_intr = ret;
 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
+	} else if (type == ICE_VSI_CTRL) {
+		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
+				    vsi->vsi_id, ret);
+		}
+		vsi->msix_intr = ret;
+		vsi->nb_msix = 1;
 	} else {
 		vsi->msix_intr = 0;
 		vsi->nb_msix = 0;
 	}
+
 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
 	if (ret != ICE_SUCCESS) {
 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
@@ -1202,20 +1251,22 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
 
-	/* MAC configuration */
-	rte_memcpy(pf->dev_addr.addr_bytes,
-		   hw->port_info->mac.perm_addr,
-		   ETH_ADDR_LEN);
+	if (type == ICE_VSI_PF) {
+		/* MAC configuration */
+		rte_memcpy(pf->dev_addr.addr_bytes,
+			   hw->port_info->mac.perm_addr,
+			   ETH_ADDR_LEN);
 
-	rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
-	ret = ice_add_mac_filter(vsi, &mac_addr);
-	if (ret != ICE_SUCCESS)
-		PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
+		rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
+		ret = ice_add_mac_filter(vsi, &mac_addr);
+		if (ret != ICE_SUCCESS)
+			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
 
-	rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
-	ret = ice_add_mac_filter(vsi, &mac_addr);
-	if (ret != ICE_SUCCESS)
-		PMD_INIT_LOG(ERR, "Failed to add MAC filter");
+		rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
+		ret = ice_add_mac_filter(vsi, &mac_addr);
+		if (ret != ICE_SUCCESS)
+			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
+	}
 
 	/* At the beginning, only TC0. */
 	/* What we need here is the maximam number of the TX queues.
@@ -1253,7 +1304,9 @@ ice_send_driver_ver(struct ice_hw *hw)
 static int
 ice_pf_setup(struct ice_pf *pf)
 {
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
 	struct ice_vsi *vsi;
+	uint16_t unused;
 
 	/* Clear all stats counters */
 	pf->offset_loaded = FALSE;
@@ -1262,6 +1315,13 @@ ice_pf_setup(struct ice_pf *pf)
 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
 
+	/* force guaranteed filter pool for PF */
+	ice_alloc_fd_guar_item(hw, &unused,
+			       hw->func_caps.fd_fltr_guar);
+	/* force shared filter pool for PF */
+	ice_alloc_fd_shrd_item(hw, &unused,
+			       hw->func_caps.fd_fltr_best_effort);
+
 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
 	if (!vsi) {
 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
@@ -1698,7 +1758,7 @@ ice_dev_init(struct rte_eth_dev *dev)
 	return ret;
 }
 
-static int
+int
 ice_release_vsi(struct ice_vsi *vsi)
 {
 	struct ice_hw *hw;
@@ -1780,6 +1840,9 @@ ice_dev_stop(struct rte_eth_dev *dev)
 	/* disable all queue interrupts */
 	ice_vsi_disable_queues_intr(main_vsi);
 
+	if (pf->fdir.fdir_vsi)
+		ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
+
 	/* Clear all queues and release mbufs */
 	ice_clear_queues(dev);
 
@@ -2117,6 +2180,12 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
 	/* Enable interrupts for all the queues */
 	ice_vsi_enable_queues_intr(vsi);
 
+	/* Enable FDIR MSIX interrupt */
+	if (pf->fdir.fdir_vsi) {
+		ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+		ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+	}
+
 	rte_intr_enable(intr_handle);
 
 	return 0;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index d1d07641d..c43242b63 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -249,6 +249,17 @@ TAILQ_HEAD(ice_flow_list, rte_flow);
 struct ice_flow_parser;
 TAILQ_HEAD(ice_parser_list, ice_flow_parser);
 
+/**
+ *  A structure used to define fields of a FDIR related info.
+ */
+struct ice_fdir_info {
+	struct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
+	struct ice_tx_queue *txq;
+	struct ice_rx_queue *rxq;
+	void *prg_pkt;                 /* memory for fdir program packet */
+	uint64_t dma_addr;             /* physic address of packet memory*/
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -268,6 +279,9 @@ struct ice_pf {
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
 	uint16_t base_queue; /* The base queue pairs index  in the device */
+	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+	uint16_t fdir_qp_offset;
+	struct ice_fdir_info fdir; /* flow director info */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
@@ -348,6 +362,11 @@ struct ice_vsi_vlan_pvid_info {
 #define ICE_PF_TO_ETH_DEV(pf) \
 	(((struct ice_pf *)pf)->adapter->eth_dev)
 
+struct ice_vsi *
+ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
+int
+ice_release_vsi(struct ice_vsi *vsi);
+
 static inline int
 ice_align_floor(int n)
 {
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
new file mode 100644
index 000000000..03d143058
--- /dev/null
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -0,0 +1,139 @@
+#include <stdio.h>
+#include <rte_flow.h>
+#include "base/ice_fdir.h"
+#include "base/ice_flow.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+#include "ice_rxtx.h"
+#include "ice_generic_flow.h"
+
+static const struct rte_memzone *
+ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
+{
+	const struct rte_memzone *mz;
+
+	mz = rte_memzone_lookup(name);
+	if (mz)
+		return mz;
+
+	mz = rte_memzone_reserve_aligned(name, len, socket_id,
+					 RTE_MEMZONE_IOVA_CONTIG,
+					 ICE_RING_BASE_ALIGN);
+	return mz;
+}
+
+#define ICE_FDIR_MZ_NAME	"FDIR_MEMZONE"
+
+/*
+ * ice_fdir_setup - reserve and initialize the Flow Director resources
+ * @pf: board private structure
+ */
+static int
+ice_fdir_setup(struct ice_pf *pf)
+{
+	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	const struct rte_memzone *mz = NULL;
+	char z_name[RTE_MEMZONE_NAMESIZE];
+	struct ice_vsi *vsi;
+	int err = ICE_SUCCESS;
+
+	if ((pf->flags & ICE_FLAG_FDIR) == 0) {
+		PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
+		return -ENOTSUP;
+	}
+
+	PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
+		    " fd_fltr_best_effort = %u.",
+		    hw->func_caps.fd_fltr_guar,
+		    hw->func_caps.fd_fltr_best_effort);
+
+	if (pf->fdir.fdir_vsi) {
+		PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
+		return ICE_SUCCESS;
+	}
+
+	/* make new FDIR VSI */
+	vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
+	if (!vsi) {
+		PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
+		return -EINVAL;
+	}
+	pf->fdir.fdir_vsi = vsi;
+
+	/*Fdir tx queue setup*/
+	err = ice_fdir_setup_tx_resources(pf);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
+		goto fail_setup_tx;
+	}
+
+	/*Fdir rx queue setup*/
+	err = ice_fdir_setup_rx_resources(pf);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
+		goto fail_setup_rx;
+	}
+
+	err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
+		goto fail_mem;
+	}
+
+	err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
+		goto fail_mem;
+	}
+
+	/* reserve memory for the fdir programming packet */
+	snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
+		 ICE_FDIR_MZ_NAME,
+		 eth_dev->data->port_id);
+	mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
+	if (!mz) {
+		PMD_DRV_LOG(ERR, "Cannot init memzone for "
+			    "flow director program packet.");
+		err = -ENOMEM;
+		goto fail_mem;
+	}
+	pf->fdir.prg_pkt = mz->addr;
+	pf->fdir.dma_addr = mz->iova;
+
+	PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
+		    vsi->base_queue);
+	return ICE_SUCCESS;
+
+fail_mem:
+	ice_rx_queue_release(pf->fdir.rxq);
+	pf->fdir.rxq = NULL;
+fail_setup_rx:
+	ice_tx_queue_release(pf->fdir.txq);
+	pf->fdir.txq = NULL;
+fail_setup_tx:
+	ice_release_vsi(vsi);
+	pf->fdir.fdir_vsi = NULL;
+	return err;
+}
+
+static int
+ice_init_fdir_filter(struct ice_adapter *ad)
+{
+	struct ice_pf *pf = &ad->pf;
+	int ret;
+
+	ret = ice_fdir_setup(pf);
+
+	return ret;
+}
+
+static struct ice_flow_engine ice_fdir_engine = {
+	.init = ice_init_fdir_filter,
+	.type = ICE_FLOW_ENGINE_FDIR,
+};
+
+RTE_INIT(ice_fdir_init_log)
+{
+	ice_register_flow_engine(&ice_fdir_engine);
+}
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 0282b5375..bd802e350 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -474,6 +474,175 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
+static enum ice_status
+ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
+{
+	struct ice_vsi *vsi = rxq->vsi;
+	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+	struct ice_rlan_ctx rx_ctx;
+	enum ice_status err;
+	uint32_t regval;
+
+	/**
+	 * The kernel driver uses flex descriptor. It sets the register
+	 * to flex descriptor mode.
+	 * DPDK uses legacy descriptor. It should set the register back
+	 * to the default value, then uses legacy descriptor mode.
+	 */
+	regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+		 QRXFLXP_CNTXT_RXDID_PRIO_M;
+	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
+
+	rxq->rx_hdr_len = 0;
+	rxq->rx_buf_len = 1024;
+
+	memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
+	rx_ctx.qlen = rxq->nb_rx_desc;
+	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
+	rx_ctx.dtype = 0; /* No Header Split mode */
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	rx_ctx.dsize = 1; /* 32B descriptors */
+#endif
+	rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
+	/* TPH: Transaction Layer Packet (TLP) processing hints */
+	rx_ctx.tphrdesc_ena = 1;
+	rx_ctx.tphwdesc_ena = 1;
+	rx_ctx.tphdata_ena = 1;
+	rx_ctx.tphhead_ena = 1;
+	/* Low Receive Queue Threshold defined in 64 descriptors units.
+	 * When the number of free descriptors goes below the lrxqthresh,
+	 * an immediate interrupt is triggered.
+	 */
+	rx_ctx.lrxqthresh = 2;
+	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
+	rx_ctx.l2tsel = 1;
+	rx_ctx.showiv = 0;
+	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+
+	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
+			    rxq->queue_id);
+		return -EINVAL;
+	}
+	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
+			    rxq->queue_id);
+		return -EINVAL;
+	}
+
+	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
+
+	/* Init the Rx tail register*/
+	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+	return 0;
+}
+
+int
+ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ice_rx_queue *rxq;
+	int err;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = pf->fdir.rxq;
+	if (!rxq || !rxq->q_set) {
+		PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
+			    rx_queue_id);
+		return -EINVAL;
+	}
+
+	err = ice_fdir_program_hw_rx_queue(rxq);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
+			    rx_queue_id);
+		return -EIO;
+	}
+
+	rte_wmb();
+
+	/* Init the RX tail register. */
+	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+	err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
+			    rx_queue_id);
+
+		ice_reset_rx_queue(rxq);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_tx_queue *txq;
+	int err;
+	struct ice_vsi *vsi;
+	struct ice_hw *hw;
+	struct ice_aqc_add_tx_qgrp txq_elem;
+	struct ice_tlan_ctx tx_ctx;
+
+	PMD_INIT_FUNC_TRACE();
+
+	txq = pf->fdir.txq;
+	if (!txq || !txq->q_set) {
+		PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
+			    tx_queue_id);
+		return -EINVAL;
+	}
+
+	vsi = txq->vsi;
+	hw = ICE_VSI_TO_HW(vsi);
+
+	memset(&txq_elem, 0, sizeof(txq_elem));
+	memset(&tx_ctx, 0, sizeof(tx_ctx));
+	txq_elem.num_txqs = 1;
+	txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+
+	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
+	tx_ctx.qlen = txq->nb_tx_desc;
+	tx_ctx.pf_num = hw->pf_id;
+	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+	tx_ctx.src_vsi = vsi->vsi_id;
+	tx_ctx.port_num = hw->port_info->lport;
+	tx_ctx.tso_ena = 1; /* tso enable */
+	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
+	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+
+	ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+		    ice_tlan_ctx_info);
+
+	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
+
+	/* Init the Tx tail register*/
+	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
+
+	/* Fix me, we assume TC always 0 here */
+	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
+			      &txq_elem, sizeof(txq_elem), NULL);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
+		return -EIO;
+	}
+	/* store the schedule node id */
+	txq->q_teid = txq_elem.txqs[0].q_teid;
+
+	return 0;
+}
+
 /* Free all mbufs for descriptors in tx queue */
 static void
 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
@@ -997,6 +1166,10 @@ ice_rxd_status_to_pkt_flags(uint64_t qword)
 		  ICE_RX_DESC_FLTSTAT_RSS_HASH) ==
 		 ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
 
+	/* Check if FDIR Match */
+	flags |= (qword & (1 << ICE_RX_DESC_STATUS_FLM_S) ?
+		  PKT_RX_FDIR : 0);
+
 	return flags;
 }
 
@@ -1060,6 +1233,33 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
 		   mb->vlan_tci, mb->vlan_tci_outer);
 }
 
+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_M   0x03
+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01
+
+static inline uint64_t
+ice_rxd_build_fdir(volatile union ice_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+	uint64_t flags = 0;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	uint16_t flexbh;
+
+	flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+		ICE_RX_DESC_EXT_STATUS_FLEXBH_S) &
+		ICE_RX_DESC_EXT_STATUS_FLEXBH_M;
+
+	if (flexbh == ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+		mb->hash.fdir.hi =
+			rte_le_to_cpu_32(rxdp->wb.qword3.fd_id);
+		flags |= PKT_RX_FDIR_ID;
+	}
+#else
+	mb->hash.fdir.hi =
+		rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+	flags |= PKT_RX_FDIR_ID;
+#endif
+	return flags;
+}
+
 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
 #define ICE_LOOK_AHEAD 8
 #if (ICE_LOOK_AHEAD != 8)
@@ -1127,6 +1327,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 				mb->hash.rss =
 					rte_le_to_cpu_32(
 						rxdp[j].wb.qword0.hi_dword.rss);
+			if (pkt_flags & PKT_RX_FDIR)
+				pkt_flags |= ice_rxd_build_fdir(&rxdp[j], mb);
 			mb->packet_type = ptype_tbl[(uint8_t)(
 						(qword1 &
 						 ICE_RXD_QW1_PTYPE_M) >>
@@ -1448,6 +1650,8 @@ ice_recv_scattered_pkts(void *rx_queue,
 		if (pkt_flags & PKT_RX_RSS_HASH)
 			first_seg->hash.rss =
 				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+		if (pkt_flags & PKT_RX_FDIR)
+			pkt_flags |= ice_rxd_build_fdir(&rxd, first_seg);
 
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
@@ -1635,6 +1839,127 @@ ice_free_queues(struct rte_eth_dev *dev)
 	dev->data->nb_tx_queues = 0;
 }
 
+#define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
+#define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
+
+int
+ice_fdir_setup_tx_resources(struct ice_pf *pf)
+{
+	struct ice_tx_queue *txq;
+	const struct rte_memzone *tz = NULL;
+	uint32_t ring_size;
+	struct rte_eth_dev *dev;
+
+	if (!pf) {
+		PMD_DRV_LOG(ERR, "PF is not available");
+		return -EINVAL;
+	}
+
+	dev = pf->adapter->eth_dev;
+
+	/* Allocate the TX queue data structure. */
+	txq = rte_zmalloc_socket("ice fdir tx queue",
+				 sizeof(struct ice_tx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 SOCKET_ID_ANY);
+	if (!txq) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+			    "tx queue structure.");
+		return -ENOMEM;
+	}
+
+	/* Allocate TX hardware ring descriptors. */
+	ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
+	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+
+	tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+				      ICE_FDIR_QUEUE_ID, ring_size,
+				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
+	if (!tz) {
+		ice_tx_queue_release(txq);
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
+		return -ENOMEM;
+	}
+
+	txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
+	txq->queue_id = ICE_FDIR_QUEUE_ID;
+	txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+	txq->vsi = pf->fdir.fdir_vsi;
+
+	txq->tx_ring_dma = tz->iova;
+	txq->tx_ring = (struct ice_tx_desc *)tz->addr;
+	/*
+	 * don't need to allocate software ring and reset for the fdir
+	 * program queue just set the queue has been configured.
+	 */
+	txq->q_set = TRUE;
+	pf->fdir.txq = txq;
+
+	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
+
+	return ICE_SUCCESS;
+}
+
+int
+ice_fdir_setup_rx_resources(struct ice_pf *pf)
+{
+	struct ice_rx_queue *rxq;
+	const struct rte_memzone *rz = NULL;
+	uint32_t ring_size;
+	struct rte_eth_dev *dev;
+
+	if (!pf) {
+		PMD_DRV_LOG(ERR, "PF is not available");
+		return -EINVAL;
+	}
+
+	dev = pf->adapter->eth_dev;
+
+	/* Allocate the RX queue data structure. */
+	rxq = rte_zmalloc_socket("ice fdir rx queue",
+				 sizeof(struct ice_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 SOCKET_ID_ANY);
+	if (!rxq) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+			    "rx queue structure.");
+		return -ENOMEM;
+	}
+
+	/* Allocate RX hardware ring descriptors. */
+	ring_size = sizeof(union ice_rx_desc) * ICE_FDIR_NUM_RX_DESC;
+	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+
+	rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+				      ICE_FDIR_QUEUE_ID, ring_size,
+				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
+	if (!rz) {
+		ice_rx_queue_release(rxq);
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
+		return -ENOMEM;
+	}
+
+	rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
+	rxq->queue_id = ICE_FDIR_QUEUE_ID;
+	rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+	rxq->vsi = pf->fdir.fdir_vsi;
+
+	rxq->rx_ring_dma = rz->iova;
+	memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * sizeof(union ice_rx_desc));
+	rxq->rx_ring = (union ice_rx_desc *)rz->addr;
+
+	/*
+	 * Don't need to allocate software ring and reset for the fdir
+	 * rx queue, just set the queue has been configured.
+	 */
+	rxq->q_set = TRUE;
+	pf->fdir.rxq = rxq;
+
+	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
+
+	return ICE_SUCCESS;
+}
+
 uint16_t
 ice_recv_pkts(void *rx_queue,
 	      struct rte_mbuf **rx_pkts,
@@ -1716,6 +2041,8 @@ ice_recv_pkts(void *rx_queue,
 		if (pkt_flags & PKT_RX_RSS_HASH)
 			rxm->hash.rss =
 				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+		if (pkt_flags & PKT_RX_FDIR)
+			pkt_flags |= ice_rxd_build_fdir(&rxd, rxm);
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -3061,3 +3388,124 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev)
 	for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
 		ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
 }
+
+/*
+ * check the programming status descriptor in rx queue.
+ * done after Programming Flow Director is programmed on
+ * tx queue
+ */
+static inline int
+ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
+{
+	volatile union ice_rx_desc *rxdp;
+	uint64_t qword1;
+	uint32_t rx_status;
+	uint32_t len, id;
+	uint32_t error;
+	int ret = 0;
+
+	rxdp = &rxq->rx_ring[rxq->rx_tail];
+	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+	rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
+			>> ICE_RXD_QW1_STATUS_S;
+
+	if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
+		len = qword1 >> ICE_RX_PROG_STATUS_DESC_LEN_S;
+		id = (qword1 & ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M) >>
+			    ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S;
+
+		if (len  == ICE_RX_PROG_STATUS_DESC_LEN &&
+		    id == ICE_RX_PROG_STATUS_DESC_FD_FLTR_STATUS) {
+			error = (qword1 &
+				ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M) >>
+				ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S;
+			if (error == (0x1 <<
+				ICE_RX_PROG_STATUS_DESC_FD_TBL_FULL_S)) {
+				PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
+					    " (FD_ID %u): programming status"
+					    " reported.",
+					    rxdp->wb.qword0.hi_dword.fd_id);
+				ret = -1;
+			} else if (error == (0x1 <<
+				ICE_RX_PROG_STATUS_DESC_NO_FD_ENTRY_S)) {
+				PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
+					    " (FD_ID %u): programming status"
+					    " reported.",
+					    rxdp->wb.qword0.hi_dword.fd_id);
+				ret = -1;
+			} else {
+				PMD_DRV_LOG(ERR, "invalid programming status"
+					    " reported, error = %u.", error);
+			}
+		} else {
+			PMD_DRV_LOG(INFO, "unknown programming status"
+				    " reported, len = %d, id = %u.", len, id);
+		}
+		rxdp->wb.qword1.status_error_len = 0;
+		rxq->rx_tail++;
+		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
+			rxq->rx_tail = 0;
+		if (rxq->rx_tail == 0)
+			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+		else
+			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
+	}
+
+	return ret;
+}
+
+#define ICE_FDIR_MAX_WAIT_US 10000
+
+int
+ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
+{
+	struct ice_tx_queue *txq = pf->fdir.txq;
+	struct ice_rx_queue *rxq = pf->fdir.rxq;
+	volatile struct ice_fltr_desc *fdirdp;
+	volatile struct ice_tx_desc *txdp;
+	uint32_t td_cmd;
+	uint16_t i;
+
+	fdirdp = (volatile struct ice_fltr_desc *)
+		(&txq->tx_ring[txq->tx_tail]);
+	fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
+	fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
+
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = ICE_TX_DESC_CMD_EOP |
+		ICE_TX_DESC_CMD_RS  |
+		ICE_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2;
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+		     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
+		    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= ICE_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to program FDIR filter: time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+
+	for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
+		if (ice_check_fdir_programming_status(rxq) >= 0)
+			return 0;
+		rte_delay_us(1);
+	}
+
+	PMD_DRV_LOG(ERR,
+		    "Failed to program FDIR filter: programming status reported.");
+	return -ETIMEDOUT;
+}
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e9214110c..450db0244 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -36,6 +36,8 @@
 #define ICE_TX_MAX_FREE_BUF_SZ      64
 #define ICE_DESCS_PER_LOOP          4
 
+#define ICE_FDIR_PKT_LEN	512
+
 typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
 typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
 
@@ -147,10 +149,14 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 void ice_rx_queue_release(void *rxq);
 void ice_tx_queue_release(void *txq);
 void ice_clear_queues(struct rte_eth_dev *dev);
 void ice_free_queues(struct rte_eth_dev *dev);
+int ice_fdir_setup_tx_resources(struct ice_pf *pf);
+int ice_fdir_setup_rx_resources(struct ice_pf *pf);
 uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		       uint16_t nb_pkts);
 uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -188,4 +194,5 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
 					  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts);
+int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 #endif /* _ICE_RXTX_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 36b4b3c85..53846442a 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -10,7 +10,8 @@ sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
 	'ice_switch_filter.c',
-	'ice_generic_flow.c'
+	'ice_generic_flow.c',
+	'ice_fdir_filter.c'
 	)
 
 deps += ['hash']
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 02/12] net/ice: tear down flow director
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-07 11:21   ` Ye Xiaolong
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 03/12] net/ice: enable input set configuration Yahui Cao
                   ` (9 subsequent siblings)
  11 siblings, 1 reply; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

From: Beilei Xing <beilei.xing@intel.com>

Release resources on flow director, include:
 - Release queue.
 - Release VSI.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 40 ++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.c        | 57 +++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.h        |  2 ++
 3 files changed, 99 insertions(+)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 03d143058..451ef92b2 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -117,6 +117,37 @@ ice_fdir_setup(struct ice_pf *pf)
 	return err;
 }
 
+/*
+ * ice_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ */
+static void
+ice_fdir_teardown(struct ice_pf *pf)
+{
+	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+	struct ice_vsi *vsi;
+	int err;
+
+	vsi = pf->fdir.fdir_vsi;
+	if (!vsi)
+		return;
+
+	err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
+
+	err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
+
+	ice_tx_queue_release(pf->fdir.txq);
+	pf->fdir.txq = NULL;
+	ice_rx_queue_release(pf->fdir.rxq);
+	pf->fdir.rxq = NULL;
+	ice_release_vsi(vsi);
+	pf->fdir.fdir_vsi = NULL;
+}
+
 static int
 ice_init_fdir_filter(struct ice_adapter *ad)
 {
@@ -128,8 +159,17 @@ ice_init_fdir_filter(struct ice_adapter *ad)
 	return ret;
 }
 
+static void
+ice_uninit_fdir_filter(struct ice_adapter *ad)
+{
+	struct ice_pf *pf = &ad->pf;
+
+	ice_fdir_teardown(pf);
+}
+
 static struct ice_flow_engine ice_fdir_engine = {
 	.init = ice_init_fdir_filter,
+	.uninit = ice_uninit_fdir_filter,
 	.type = ICE_FLOW_ENGINE_FDIR,
 };
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index bd802e350..e41fcb194 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -748,6 +748,63 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
+int
+ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ice_rx_queue *rxq;
+	int err;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	rxq = pf->fdir.rxq;
+
+	err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
+			    rx_queue_id);
+		return -EINVAL;
+	}
+	ice_rx_queue_release_mbufs(rxq);
+
+	return 0;
+}
+
+int
+ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ice_tx_queue *txq;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_vsi *vsi = pf->main_vsi;
+	enum ice_status status;
+	uint16_t q_ids[1];
+	uint32_t q_teids[1];
+	uint16_t q_handle = tx_queue_id;
+
+	txq = pf->fdir.txq;
+	if (!txq) {
+		PMD_DRV_LOG(ERR, "TX queue %u is not available",
+			    tx_queue_id);
+		return -EINVAL;
+	}
+	vsi = txq->vsi;
+
+	q_ids[0] = txq->reg_idx;
+	q_teids[0] = txq->q_teid;
+
+	/* Fix me, we assume TC always 0 here */
+	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
+				 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
+	if (status != ICE_SUCCESS) {
+		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
+		return -EINVAL;
+	}
+
+	ice_tx_queue_release_mbufs(txq);
+
+	return 0;
+}
+
 int
 ice_rx_queue_setup(struct rte_eth_dev *dev,
 		   uint16_t queue_idx,
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 450db0244..24376c0d5 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -151,6 +151,8 @@ int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 void ice_rx_queue_release(void *rxq);
 void ice_tx_queue_release(void *txq);
 void ice_clear_queues(struct rte_eth_dev *dev);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 03/12] net/ice: enable input set configuration
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 02/12] net/ice: tear down " Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-07 12:32   ` Ye Xiaolong
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 04/12] net/ice: add FDIR create and destroy Yahui Cao
                   ` (8 subsequent siblings)
  11 siblings, 1 reply; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

From: Beilei Xing <beilei.xing@intel.com>

Configure input set, include:
 - Parse input set.
 - Check the segment.
 - Create profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/ice/ice_ethdev.h      |   3 +
 drivers/net/ice/ice_fdir_filter.c | 245 ++++++++++++++++++++++++++++++
 2 files changed, 248 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index c43242b63..ea68858d1 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -366,6 +366,9 @@ struct ice_vsi *
 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
 int
 ice_release_vsi(struct ice_vsi *vsi);
+int
+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
+			uint64_t input_set);
 
 static inline int
 ice_align_floor(int n)
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 451ef92b2..0840c3b4b 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -148,6 +148,251 @@ ice_fdir_teardown(struct ice_pf *pf)
 	pf->fdir.fdir_vsi = NULL;
 }
 
+static void
+ice_fdir_rm_prof(struct ice_hw *hw, enum ice_fltr_ptype ptype)
+{
+	struct ice_fd_hw_prof *hw_prof = hw->fdir_prof[ptype];
+	uint64_t prof_id;
+	uint16_t vsi_num;
+	int tun;
+	int i;
+
+	if (!hw->fdir_prof || !hw->fdir_prof[ptype])
+		return;
+
+	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+		if (!hw_prof->fdir_seg[tun])
+			break;
+		prof_id = ptype + tun * ICE_FLTR_PTYPE_MAX;
+		for (i = 0; i < hw_prof->cnt; i++) {
+			if (hw_prof->entry_h[i][tun]) {
+				vsi_num = ice_get_hw_vsi_num(hw,
+							     hw_prof->vsi_h[i]);
+				ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+						     vsi_num, ptype);
+				ice_flow_rem_entry(hw,
+						   hw_prof->entry_h[i][tun]);
+				hw_prof->entry_h[i][tun] = 0;
+			}
+		}
+		ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+		rte_free(hw_prof->fdir_seg[tun]);
+		hw_prof->fdir_seg[tun] = NULL;
+	}
+	for (i = 0; i < hw_prof->cnt; i++)
+		hw_prof->vsi_h[i] = 0;
+	hw_prof->cnt = 0;
+}
+
+static int
+ice_fdir_cfg_hw_tbl(struct ice_pf *pf, struct ice_vsi *vsi,
+		    struct ice_vsi *ctrl_vsi,
+		    struct ice_flow_seg_info *seg,
+		    enum ice_fltr_ptype ptype,
+		    bool is_tunnel)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	enum ice_flow_dir dir = ICE_FLOW_RX;
+	struct ice_flow_seg_info *ori_seg;
+	struct ice_fd_hw_prof *hw_prof;
+	struct ice_flow_prof *prof;
+	uint64_t entry_1 = 0;
+	uint64_t entry_2 = 0;
+	uint16_t vsi_num;
+	int ret;
+	uint64_t prof_id;
+
+	if (!hw->fdir_prof) {
+		hw->fdir_prof = (struct ice_fd_hw_prof **)
+			ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+				   sizeof(*hw->fdir_prof));
+		if (!hw->fdir_prof)
+			return -ENOMEM;
+	}
+	if (!hw->fdir_prof[ptype]) {
+		hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
+			ice_malloc(hw, sizeof(**hw->fdir_prof));
+		if (!hw->fdir_prof[ptype])
+			return -ENOMEM;
+	}
+
+	hw_prof = hw->fdir_prof[ptype];
+	ori_seg = hw_prof->fdir_seg[is_tunnel];
+	if (ori_seg) {
+		if (!memcmp(ori_seg, seg, sizeof(*seg)))
+			return -EAGAIN;
+		if (hw->fdir_fltr_cnt[ptype])
+			return -EINVAL;
+
+		ice_fdir_rm_prof(hw, ptype);
+	}
+
+	prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
+	ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
+				(is_tunnel) ? 2 : 1, NULL, 0, &prof);
+	if (ret)
+		return ret;
+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
+				 vsi->idx, ICE_FLOW_PRIO_NORMAL,
+				 seg, NULL, 0, &entry_1);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
+			    ptype);
+		goto err_add_prof;
+	}
+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
+				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+				 seg, NULL, 0, &entry_2);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
+			    ptype);
+		goto err_add_entry;
+	}
+
+	hw_prof->cnt = 0;
+	hw_prof->fdir_seg[is_tunnel] = seg;
+	hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
+	hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
+
+	return ret;
+
+err_add_entry:
+	vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+	ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+	ice_flow_rem_entry(hw, entry_1);
+err_add_prof:
+	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+
+	return ret;
+}
+
+static void
+ice_parse_input_set(uint64_t inset, enum ice_flow_field *field)
+{
+	uint32_t i, j;
+
+	struct ice_inset_map {
+		uint64_t inset;
+		enum ice_flow_field fld;
+	};
+	static const struct ice_inset_map ice_inset_map[] = {
+		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
+		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
+		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
+		{ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
+		{ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
+		{ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
+		{ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
+		{ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
+		{ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
+		{ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
+	};
+
+	for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
+		if (inset & ice_inset_map[i].inset)
+			field[j++] = ice_inset_map[i].fld;
+	}
+}
+
+int
+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
+			uint64_t input_set)
+{
+	struct ice_flow_seg_info *seg, *seg_tun;
+	enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
+	int i, ret;
+
+	if (!input_set)
+		return -EINVAL;
+
+	seg = (struct ice_flow_seg_info *)
+		ice_malloc(hw, sizeof(*seg));
+	if (!seg) {
+		PMD_DRV_LOG(ERR, "No memory can be allocated");
+		return -ENOMEM;
+	}
+
+	seg_tun = (struct ice_flow_seg_info *)
+		ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
+	if (!seg_tun) {
+		PMD_DRV_LOG(ERR, "No memory can be allocated");
+		rte_free(seg);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
+		field[i] = ICE_FLOW_FIELD_IDX_MAX;
+	ice_parse_input_set(input_set, field);
+
+	switch (flow) {
+	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+				  ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+				  ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+				  ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+				  ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+				  ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+				  ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "not supported filter type.");
+		break;
+	}
+
+	for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
+		ice_flow_set_fld(seg, field[i],
+				 ICE_FLOW_FLD_OFF_INVAL,
+				 ICE_FLOW_FLD_OFF_INVAL,
+				 ICE_FLOW_FLD_OFF_INVAL, false);
+	}
+
+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
+				  seg, flow, 0);
+	if (ret < 0)
+		goto FREE_SEG;
+
+	rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
+				  seg_tun, flow, 1);
+
+	if (!ret)
+		return ret;
+	else if (ret < 0)
+		goto FREE_SEG;
+
+FREE_SEG:
+	rte_free(seg);
+	rte_free(seg_tun);
+
+	if (ret == -EAGAIN)
+		return 0;
+	else
+		return ret;
+}
+
 static int
 ice_init_fdir_filter(struct ice_adapter *ad)
 {
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 04/12] net/ice: add FDIR create and destroy
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (2 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 03/12] net/ice: enable input set configuration Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-07 12:50   ` Ye Xiaolong
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 05/12] net/ice: add FDIR mark action support Yahui Cao
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

Add ice_create_fdir_filter to create a rule. If a flow is matched by
flow director filter, filter rule will be set to HW. Only basic pattern
and queue/passthru/drop are supported.

Add ice_destroy_fdir_filter to destroy a rule. If a flow is created
before, filter rule will be removed from HW.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_ethdev.h      |   6 +
 drivers/net/ice/ice_fdir_filter.c | 508 ++++++++++++++++++++++++++++++
 2 files changed, 514 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index ea68858d1..bb821bc41 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -249,6 +249,11 @@ TAILQ_HEAD(ice_flow_list, rte_flow);
 struct ice_flow_parser;
 TAILQ_HEAD(ice_parser_list, ice_flow_parser);
 
+struct ice_fdir_filter_conf {
+	struct ice_fdir_fltr input;
+	uint64_t input_set;
+};
+
 /**
  *  A structure used to define fields of a FDIR related info.
  */
@@ -258,6 +263,7 @@ struct ice_fdir_info {
 	struct ice_rx_queue *rxq;
 	void *prg_pkt;                 /* memory for fdir program packet */
 	uint64_t dma_addr;             /* physic address of packet memory*/
+	struct ice_fdir_filter_conf conf;
 };
 
 struct ice_pf {
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 0840c3b4b..98bc1be49 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -7,6 +7,51 @@
 #include "ice_rxtx.h"
 #include "ice_generic_flow.h"
 
+#define ICE_FDIR_INSET_ETH_IPV4 (\
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
+	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
+
+#define ICE_FDIR_INSET_ETH_IPV4_UDP (\
+	ICE_FDIR_INSET_ETH_IPV4 | \
+	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV4_TCP (\
+	ICE_FDIR_INSET_ETH_IPV4 | \
+	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
+	ICE_FDIR_INSET_ETH_IPV4 | \
+	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6 (\
+	ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
+
+#define ICE_FDIR_INSET_ETH_IPV6_UDP (\
+	ICE_FDIR_INSET_ETH_IPV6 | \
+	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6_TCP (\
+	ICE_FDIR_INSET_ETH_IPV6 | \
+	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
+	ICE_FDIR_INSET_ETH_IPV6 | \
+	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+static struct ice_pattern_match_item ice_fdir_pattern[] = {
+	{pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
+	{pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
+	{pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
+	{pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
+	{pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
+	{pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
+	{pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
+	{pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
+};
+
+static struct ice_flow_parser ice_fdir_parser;
+
 static const struct rte_memzone *
 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
 {
@@ -400,6 +445,10 @@ ice_init_fdir_filter(struct ice_adapter *ad)
 	int ret;
 
 	ret = ice_fdir_setup(pf);
+	if (ret)
+		return ret;
+
+	ret = ice_register_parser(&ice_fdir_parser, ad);
 
 	return ret;
 }
@@ -409,15 +458,474 @@ ice_uninit_fdir_filter(struct ice_adapter *ad)
 {
 	struct ice_pf *pf = &ad->pf;
 
+	ice_unregister_parser(&ice_fdir_parser, ad);
+
 	ice_fdir_teardown(pf);
 }
 
+static int
+ice_add_del_fdir_filter(struct ice_pf *pf,
+			struct ice_fdir_filter_conf *filter,
+			bool add)
+{
+	struct ice_fltr_desc desc;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	int ret = 0;
+
+	memset(&desc, 0, sizeof(desc));
+	ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
+
+	memset(pkt, 0, ICE_FDIR_PKT_LEN);
+	ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Generate dummy packet failed");
+		return -EINVAL;
+	}
+
+	ret = ice_fdir_programming(pf, &desc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+ice_create_fdir_filter(struct ice_adapter *ad,
+		       struct rte_flow *flow,
+		       void *meta,
+		       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_fdir_filter_conf *filter = meta;
+	struct ice_fdir_filter_conf *rule;
+	int ret = 0;
+
+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return -rte_errno;
+	}
+
+	ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
+			filter->input_set);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Profile configure failed.");
+		goto free_entry;
+	}
+
+	ret = ice_add_del_fdir_filter(pf, filter, true);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Add filter rule failed.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+	ice_fdir_update_cntrs(hw, filter->input.flow_type, true);
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+ice_destroy_fdir_filter(struct ice_adapter *ad,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_fdir_filter_conf *filter;
+	int ret;
+
+	filter = (struct ice_fdir_filter_conf *)flow->rule;
+
+	ret = ice_add_del_fdir_filter(pf, filter, false);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Del filter rule failed.");
+		return -rte_errno;
+	}
+
+	rte_free(filter);
+
+	return 0;
+}
+
 static struct ice_flow_engine ice_fdir_engine = {
 	.init = ice_init_fdir_filter,
 	.uninit = ice_uninit_fdir_filter,
+	.create = ice_create_fdir_filter,
+	.destroy = ice_destroy_fdir_filter,
 	.type = ICE_FLOW_ENGINE_FDIR,
 };
 
+static int
+ice_fdir_parse_action(struct ice_adapter *ad,
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error,
+		      struct ice_fdir_filter_conf *filter)
+{
+	struct ice_pf *pf = &ad->pf;
+	const struct rte_flow_action_queue *act_q;
+	uint32_t dest_num = 0;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+
+			act_q = actions->conf;
+			filter->input.q_index = act_q->index;
+			if (filter->input.q_index >=
+					pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "Invalid queue for FDIR.");
+				return -rte_errno;
+			}
+			filter->input.dest_ctl =
+				ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+			break;
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+
+			filter->input.dest_ctl =
+				ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+			filter->input.q_index = 0;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+		       const struct rte_flow_item pattern[],
+		       struct rte_flow_error *error,
+		       struct ice_fdir_filter_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = ICE_INSET_NONE;
+	uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
+
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item,
+					"Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec || eth_mask) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"eth mac not support");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (ipv4_spec && ipv4_mask) {
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+				    ipv4_mask->hdr.total_length ||
+				    ipv4_mask->hdr.packet_id ||
+				    ipv4_mask->hdr.fragment_offset ||
+				    ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+
+				filter->input.ip.v4.dst_ip =
+					ipv4_spec->hdr.src_addr;
+				filter->input.ip.v4.src_ip =
+					ipv4_spec->hdr.dst_addr;
+			}
+
+			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (ipv6_spec && ipv6_mask) {
+				/* Check IPv6 mask and update input set */
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+
+				rte_memcpy(filter->input.ip.v6.dst_ip,
+					   ipv6_spec->hdr.src_addr, 16);
+				rte_memcpy(filter->input.ip.v6.src_ip,
+					   ipv6_spec->hdr.dst_addr, 16);
+			}
+
+			flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (tcp_spec && tcp_mask) {
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+				    tcp_mask->hdr.recv_ack ||
+				    tcp_mask->hdr.data_off ||
+				    tcp_mask->hdr.tcp_flags ||
+				    tcp_mask->hdr.rx_win ||
+				    tcp_mask->hdr.cksum ||
+				    tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TCP_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TCP_DST_PORT;
+
+				/* Get filter info */
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+					filter->input.ip.v4.dst_port =
+						tcp_spec->hdr.src_port;
+					filter->input.ip.v4.src_port =
+						tcp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+					filter->input.ip.v6.dst_port =
+						tcp_spec->hdr.src_port;
+					filter->input.ip.v6.src_port =
+						tcp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+				}
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (udp_spec && udp_mask) {
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_UDP_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_UDP_DST_PORT;
+
+				/* Get filter info */
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+					filter->input.ip.v4.dst_port =
+						udp_spec->hdr.src_port;
+					filter->input.ip.v4.src_port =
+						udp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+					filter->input.ip.v6.src_port =
+						udp_spec->hdr.src_port;
+					filter->input.ip.v6.dst_port =
+						udp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+				}
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (sctp_spec && sctp_mask) {
+				/* Check SCTP mask and update input set */
+				if (sctp_mask->hdr.cksum) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SCTP_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_SCTP_DST_PORT;
+
+				/* Get filter info */
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+					filter->input.ip.v4.dst_port =
+						sctp_spec->hdr.src_port;
+					filter->input.ip.v4.src_port =
+						sctp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+					filter->input.ip.v6.dst_port =
+						sctp_spec->hdr.src_port;
+					filter->input.ip.v6.src_port =
+						sctp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+				}
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   item,
+				   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	filter->input.flow_type = flow_type;
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+ice_fdir_parse(struct ice_adapter *ad,
+	       struct ice_pattern_match_item *array,
+	       uint32_t array_len,
+	       const struct rte_flow_item pattern[],
+	       const struct rte_flow_action actions[],
+	       void **meta,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
+	struct ice_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+	item = ice_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		return ret;
+	input_set = filter->input_set;
+	if (!input_set || input_set & (~item->input_set_mask)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	ret = ice_fdir_parse_action(ad, actions, error, filter);
+	if (ret)
+		return ret;
+
+	*meta = filter;
+
+	return 0;
+}
+
+static struct ice_flow_parser ice_fdir_parser = {
+	.engine = &ice_fdir_engine,
+	.array = ice_fdir_pattern,
+	.array_len = RTE_DIM(ice_fdir_pattern),
+	.parse_pattern_action = ice_fdir_parse,
+	.stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
 RTE_INIT(ice_fdir_init_log)
 {
 	ice_register_flow_engine(&ice_fdir_engine);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 05/12] net/ice: add FDIR mark action support
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (3 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 04/12] net/ice: add FDIR create and destroy Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 06/12] net/ice: add hash table for FDIR Yahui Cao
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

FDIR will add mark id in the packet mbuf when flow rule hits.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 98bc1be49..294678075 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -577,7 +577,9 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 {
 	struct ice_pf *pf = &ad->pf;
 	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec = NULL;
 	uint32_t dest_num = 0;
+	uint32_t mark_num = 0;
 
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 		switch (actions->type) {
@@ -611,6 +613,12 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 				ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
 			filter->input.q_index = 0;
 			break;
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark_num++;
+
+			mark_spec = actions->conf;
+			filter->input.fltr_id = mark_spec->id;
+			break;
 		default:
 			rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -626,6 +634,13 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 		return -rte_errno;
 	}
 
+	if (mark_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Too many mark actions");
+		return -rte_errno;
+	}
+
 	return 0;
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 06/12] net/ice: add hash table for FDIR
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (4 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 05/12] net/ice: add FDIR mark action support Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 07/12] net/ice: enable FDIR queue group Yahui Cao
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

Enable quick lookup for existing flow director rule entry.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_ethdev.h      |  17 +++
 drivers/net/ice/ice_fdir_filter.c | 186 ++++++++++++++++++++++++++++--
 2 files changed, 196 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index bb821bc41..30ab518cd 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -254,6 +254,20 @@ struct ice_fdir_filter_conf {
 	uint64_t input_set;
 };
 
+#define ICE_MAX_FDIR_FILTER_NUM		(1024 * 16)
+
+struct ice_fdir_fltr_pattern {
+	enum ice_fltr_ptype flow_type;
+
+	union {
+		struct ice_fdir_v4 v4;
+		struct ice_fdir_v6 v6;
+	} ip, mask;
+
+	struct ice_fdir_extra ext_data;
+	struct ice_fdir_extra ext_mask;
+};
+
 /**
  *  A structure used to define fields of a FDIR related info.
  */
@@ -264,6 +278,9 @@ struct ice_fdir_info {
 	void *prg_pkt;                 /* memory for fdir program packet */
 	uint64_t dma_addr;             /* physic address of packet memory*/
 	struct ice_fdir_filter_conf conf;
+
+	struct ice_fdir_filter_conf **hash_map;
+	struct rte_hash *hash_table;
 };
 
 struct ice_pf {
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 294678075..df4d0329c 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1,5 +1,7 @@
 #include <stdio.h>
 #include <rte_flow.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
 #include "base/ice_fdir.h"
 #include "base/ice_flow.h"
 #include "base/ice_type.h"
@@ -69,6 +71,60 @@ ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
 
 #define ICE_FDIR_MZ_NAME	"FDIR_MEMZONE"
 
+static int
+ice_init_fdir_filter_list(struct ice_pf *pf)
+{
+	struct rte_eth_dev *dev = pf->adapter->eth_dev;
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	char fdir_hash_name[RTE_HASH_NAMESIZE];
+	int ret;
+
+	struct rte_hash_parameters fdir_hash_params = {
+		.name = fdir_hash_name,
+		.entries = ICE_MAX_FDIR_FILTER_NUM,
+		.key_len = sizeof(struct ice_fdir_fltr_pattern),
+		.hash_func = rte_hash_crc,
+		.hash_func_init_val = 0,
+		.socket_id = rte_socket_id(),
+	};
+
+	/* Initialize hash */
+	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+		 "fdir_%s", dev->device->name);
+	fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+	if (!fdir_info->hash_table) {
+		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+		return -EINVAL;
+	}
+	fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
+					  sizeof(*fdir_info->hash_map) *
+					  ICE_MAX_FDIR_FILTER_NUM,
+					  0);
+	if (!fdir_info->hash_map) {
+		PMD_INIT_LOG(ERR,
+			     "Failed to allocate memory for fdir hash map!");
+		ret = -ENOMEM;
+		goto err_fdir_hash_map_alloc;
+	}
+	return 0;
+
+err_fdir_hash_map_alloc:
+	rte_hash_free(fdir_info->hash_table);
+
+	return ret;
+}
+
+static void
+ice_release_fdir_filter_list(struct ice_pf *pf)
+{
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+
+	if (fdir_info->hash_map)
+		rte_free(fdir_info->hash_map);
+	if (fdir_info->hash_table)
+		rte_hash_free(fdir_info->hash_table);
+}
+
 /*
  * ice_fdir_setup - reserve and initialize the Flow Director resources
  * @pf: board private structure
@@ -106,6 +162,12 @@ ice_fdir_setup(struct ice_pf *pf)
 	}
 	pf->fdir.fdir_vsi = vsi;
 
+	err = ice_init_fdir_filter_list(pf);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
+		return -EINVAL;
+	}
+
 	/*Fdir tx queue setup*/
 	err = ice_fdir_setup_tx_resources(pf);
 	if (err) {
@@ -177,6 +239,8 @@ ice_fdir_teardown(struct ice_pf *pf)
 	if (!vsi)
 		return;
 
+	ice_release_fdir_filter_list(pf);
+
 	err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
@@ -490,6 +554,74 @@ ice_add_del_fdir_filter(struct ice_pf *pf,
 	return 0;
 }
 
+static void
+ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
+			  struct ice_fdir_filter_conf *filter)
+{
+	struct ice_fdir_fltr *input = &filter->input;
+	memset(key, 0, sizeof(*key));
+
+	key->flow_type = input->flow_type;
+	rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
+	rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
+	rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
+	rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
+}
+
+/* Check if there exists the flow director filter */
+static struct ice_fdir_filter_conf *
+ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
+			const struct ice_fdir_fltr_pattern *key)
+{
+	int ret;
+
+	ret = rte_hash_lookup(fdir_info->hash_table, key);
+	if (ret < 0)
+		return NULL;
+
+	return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director entry into the SW list */
+static int
+ice_fdir_entry_insert(struct ice_pf *pf,
+		      struct ice_fdir_filter_conf *entry,
+		      struct ice_fdir_fltr_pattern *key)
+{
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	int ret;
+
+	ret = rte_hash_add_key(fdir_info->hash_table, key);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to insert fdir entry to hash table %d!",
+			    ret);
+		return ret;
+	}
+	fdir_info->hash_map[ret] = entry;
+
+	return 0;
+}
+
+/* Delete a flow director entry from the SW list */
+static int
+ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
+{
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	int ret;
+
+	ret = rte_hash_del_key(fdir_info->hash_table, key);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to delete fdir filter to hash table %d!",
+			    ret);
+		return ret;
+	}
+	fdir_info->hash_map[ret] = NULL;
+
+	return 0;
+}
+
 static int
 ice_create_fdir_filter(struct ice_adapter *ad,
 		       struct rte_flow *flow,
@@ -499,11 +631,22 @@ ice_create_fdir_filter(struct ice_adapter *ad,
 	struct ice_pf *pf = &ad->pf;
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
 	struct ice_fdir_filter_conf *filter = meta;
-	struct ice_fdir_filter_conf *rule;
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	struct ice_fdir_filter_conf *entry, *node;
+	struct ice_fdir_fltr_pattern key;
 	int ret = 0;
 
-	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
-	if (!rule) {
+	ice_fdir_extract_fltr_key(&key, filter);
+	node = ice_fdir_entry_lookup(fdir_info, &key);
+	if (node) {
+		rte_flow_error_set(error, EEXIST,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Rule already exists!");
+		return -rte_errno;
+	}
+
+	entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
+	if (!entry) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "Failed to allocate memory");
@@ -527,13 +670,22 @@ ice_create_fdir_filter(struct ice_adapter *ad,
 		goto free_entry;
 	}
 
-	rte_memcpy(rule, filter, sizeof(*rule));
-	flow->rule = rule;
+	rte_memcpy(entry, filter, sizeof(*entry));
+	ret = ice_fdir_entry_insert(pf, entry, &key);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Insert entry to table failed.");
+		goto free_entry;
+	}
+
+	flow->rule = entry;
 	ice_fdir_update_cntrs(hw, filter->input.flow_type, true);
+
 	return 0;
 
 free_entry:
-	rte_free(rule);
+	rte_free(entry);
 	return -rte_errno;
 }
 
@@ -543,11 +695,22 @@ ice_destroy_fdir_filter(struct ice_adapter *ad,
 			struct rte_flow_error *error)
 {
 	struct ice_pf *pf = &ad->pf;
-	struct ice_fdir_filter_conf *filter;
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	struct ice_fdir_filter_conf *filter, *entry;
+	struct ice_fdir_fltr_pattern key;
 	int ret;
 
 	filter = (struct ice_fdir_filter_conf *)flow->rule;
 
+	ice_fdir_extract_fltr_key(&key, filter);
+	entry = ice_fdir_entry_lookup(fdir_info, &key);
+	if (!entry) {
+		rte_flow_error_set(error, ENOENT,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Can't find entry.");
+		return -rte_errno;
+	}
+
 	ret = ice_add_del_fdir_filter(pf, filter, false);
 	if (ret) {
 		rte_flow_error_set(error, -ret,
@@ -556,7 +719,16 @@ ice_destroy_fdir_filter(struct ice_adapter *ad,
 		return -rte_errno;
 	}
 
+	ret = ice_fdir_entry_del(pf, &key);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Remove entry from table failed.");
+		return -rte_errno;
+	}
+
 	rte_free(filter);
+	flow->rule = NULL;
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 07/12] net/ice: enable FDIR queue group
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (5 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 06/12] net/ice: add hash table for FDIR Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-07 18:22   ` Ye Xiaolong
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 08/12] net/ice: add FDIR dst mac support Yahui Cao
                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

FDIR can send packet to a group of queues and distruibte it by RSS.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 65 +++++++++++++++++++++++++++++++
 1 file changed, 65 insertions(+)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index df4d0329c..ebbe1bd6c 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -741,6 +741,62 @@ static struct ice_flow_engine ice_fdir_engine = {
 	.type = ICE_FLOW_ENGINE_FDIR,
 };
 
+static int
+ice_fdir_parse_action_qregion(struct ice_pf *pf,
+			      struct rte_flow_error *error,
+			      const struct rte_flow_action *act,
+			      struct ice_fdir_filter_conf *filter)
+{
+	const struct rte_flow_action_rss *rss = act->conf;
+	uint32_t i;
+
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	if (rss->queue_num <= 1) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
+				   "Queue region size can't be 0 or 1.");
+		return -rte_errno;
+	}
+
+	/* check if queue index for queue region is continuos */
+	for (i = 0; i < rss->queue_num - 1; i++) {
+		if (rss->queue[i + 1] != rss->queue[i] + 1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION, act,
+					   "Invalid queue region indexes.");
+			return -rte_errno;
+		}
+	}
+
+	if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
+				   "Invalid queue region indexes.");
+		return -rte_errno;
+	}
+
+	if (!(rte_is_power_of_2(rss->queue_num) && (rss->queue_num <= 128))) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
+				   "The region sizes should be any of the following values:"
+				   "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+				   "of queues do not exceed the VSI allocation.");
+		return -rte_errno;
+	}
+
+	filter->input.q_index = rss->queue[0];
+	filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
+	filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
+
+	return 0;
+}
+
 static int
 ice_fdir_parse_action(struct ice_adapter *ad,
 		      const struct rte_flow_action actions[],
@@ -752,6 +808,7 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 	const struct rte_flow_action_mark *mark_spec = NULL;
 	uint32_t dest_num = 0;
 	uint32_t mark_num = 0;
+	int ret;
 
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 		switch (actions->type) {
@@ -785,6 +842,14 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 				ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
 			filter->input.q_index = 0;
 			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+
+			ret = ice_fdir_parse_action_qregion(pf,
+						error, actions, filter);
+			if (ret)
+				return ret;
+			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			mark_num++;
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 08/12] net/ice: add FDIR dst mac support
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (6 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 07/12] net/ice: enable FDIR queue group Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-07 18:25   ` Ye Xiaolong
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 09/12] net/ice: add FDIR counter resource init/release Yahui Cao
                   ` (3 subsequent siblings)
  11 siblings, 1 reply; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

Enable FDIR ethernet destination address field matching support

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 27 +++++++++++++++++++++++----
 1 file changed, 23 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index ebbe1bd6c..1893aa0ee 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -10,6 +10,7 @@
 #include "ice_generic_flow.h"
 
 #define ICE_FDIR_INSET_ETH_IPV4 (\
+	ICE_INSET_DMAC | \
 	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
 	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
 
@@ -387,6 +388,7 @@ ice_parse_input_set(uint64_t inset, enum ice_flow_field *field)
 		enum ice_flow_field fld;
 	};
 	static const struct ice_inset_map ice_inset_map[] = {
+		{ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
 		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
 		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
 		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
@@ -918,13 +920,30 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			eth_spec = item->spec;
 			eth_mask = item->mask;
-			if (eth_spec || eth_mask) {
-				rte_flow_error_set(error, EINVAL,
+			if (eth_spec && eth_mask) {
+				if (!rte_is_zero_ether_addr(&eth_spec->src) ||
+				    !rte_is_zero_ether_addr(&eth_mask->src)) {
+					rte_flow_error_set(error, EINVAL,
 						RTE_FLOW_ERROR_TYPE_ITEM,
 						item,
-						"eth mac not support");
-				return -rte_errno;
+						"Src mac not support");
+					return -rte_errno;
+				}
+
+				if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid mac addr mask");
+					return -rte_errno;
+				}
+
+				input_set |= ICE_INSET_DMAC;
+				rte_memcpy(&filter->input.ext_data.dst_mac,
+					   &eth_spec->dst,
+					   RTE_ETHER_ADDR_LEN);
 			}
+
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 09/12] net/ice: add FDIR counter resource init/release
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (7 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 08/12] net/ice: add FDIR dst mac support Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 10/12] net/ice: add FDIR counter support for flow id Yahui Cao
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

This patch will alloc counter pool at dev init and
release counter pool at dev close.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_ethdev.h      | 33 +++++++++++
 drivers/net/ice/ice_fdir_filter.c | 92 +++++++++++++++++++++++++++++++
 2 files changed, 125 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 30ab518cd..8a54f57f0 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -268,6 +268,37 @@ struct ice_fdir_fltr_pattern {
 	struct ice_fdir_extra ext_mask;
 };
 
+#define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE	1
+#define ICE_FDIR_COUNTER_MAX_POOL_SIZE		32
+#define ICE_FDIR_COUNTERS_PER_BLOCK		256
+#define ICE_FDIR_COUNTER_INDEX(base_idx) \
+				((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK)
+struct ice_fdir_counter {
+	TAILQ_ENTRY(ice_fdir_counter) next;
+	uint8_t shared;
+	uint32_t ref_cnt;
+	uint32_t id;
+	uint64_t hits;
+	uint64_t bytes;
+	uint32_t hw_index;
+};
+
+TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter);
+
+struct ice_fdir_counter_pool {
+	TAILQ_ENTRY(ice_fdir_counter_pool) next;
+	struct ice_fdir_counter_list counter_list;
+	struct ice_fdir_counter counters[0];
+};
+
+TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool);
+
+struct ice_fdir_counter_pool_container {
+	struct ice_fdir_counter_pool_list pool_list;
+	struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE];
+	uint8_t index_free;
+};
+
 /**
  *  A structure used to define fields of a FDIR related info.
  */
@@ -281,6 +312,8 @@ struct ice_fdir_info {
 
 	struct ice_fdir_filter_conf **hash_map;
 	struct rte_hash *hash_table;
+
+	struct ice_fdir_counter_pool_container counter;
 };
 
 struct ice_pf {
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 1893aa0ee..5fab7441b 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -115,6 +115,88 @@ ice_init_fdir_filter_list(struct ice_pf *pf)
 	return ret;
 }
 
+static int
+ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
+			  struct ice_fdir_counter_pool_container *container,
+			  uint32_t index_start,
+			  uint32_t len)
+{
+	struct ice_fdir_counter_pool *pool;
+	uint32_t i;
+	int ret = 0;
+
+	pool = rte_zmalloc("ice_fdir_counter_pool",
+			   sizeof(*pool) +
+			   sizeof(struct ice_fdir_counter) * len,
+			   0);
+	if (!pool) {
+		PMD_INIT_LOG(ERR,
+			     "Failed to allocate memory for fdir counter pool");
+		return -ENOMEM;
+	}
+
+	TAILQ_INIT(&pool->counter_list);
+	TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
+
+	for (i = 0; i < len; i++) {
+		struct ice_fdir_counter *counter = &pool->counters[i];
+
+		counter->hw_index = index_start + i;
+		TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
+	}
+
+	if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
+		PMD_INIT_LOG(ERR, "FDIR counter pool is full");
+		ret = -EINVAL;
+		goto free_pool;
+	}
+
+	container->pools[container->index_free++] = pool;
+	return 0;
+
+free_pool:
+	rte_free(pool);
+	return ret;
+}
+
+static int
+ice_fdir_counter_init(struct ice_pf *pf)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	struct ice_fdir_counter_pool_container *container =
+				&fdir_info->counter;
+	uint32_t cnt_index, len;
+	int ret;
+
+	TAILQ_INIT(&container->pool_list);
+
+	cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
+	len = ICE_FDIR_COUNTERS_PER_BLOCK;
+
+	ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+ice_fdir_counter_release(struct ice_pf *pf)
+{
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	struct ice_fdir_counter_pool_container *container =
+				&fdir_info->counter;
+	uint8_t i;
+
+	for (i = 0; i < container->index_free; i++)
+		rte_free(container->pools[i]);
+
+	return 0;
+}
+
 static void
 ice_release_fdir_filter_list(struct ice_pf *pf)
 {
@@ -169,6 +251,12 @@ ice_fdir_setup(struct ice_pf *pf)
 		return -EINVAL;
 	}
 
+	err = ice_fdir_counter_init(pf);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
+		return -EINVAL;
+	}
+
 	/*Fdir tx queue setup*/
 	err = ice_fdir_setup_tx_resources(pf);
 	if (err) {
@@ -250,6 +338,10 @@ ice_fdir_teardown(struct ice_pf *pf)
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
 
+	err = ice_fdir_counter_release(pf);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
+
 	ice_tx_queue_release(pf->fdir.txq);
 	pf->fdir.txq = NULL;
 	ice_rx_queue_release(pf->fdir.rxq);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 10/12] net/ice: add FDIR counter support for flow id
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (8 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 09/12] net/ice: add FDIR counter resource init/release Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 11/12] net/ice: add FDIR counter support for flow shared Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 12/12] net/ice: add FDIR non-word aligned field support Yahui Cao
  11 siblings, 0 replies; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

FDIR statistical counter support hits by default.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_ethdev.h      |   7 ++
 drivers/net/ice/ice_fdir_filter.c | 147 +++++++++++++++++++++++++++++-
 2 files changed, 152 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 8a54f57f0..46cf96957 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -251,6 +251,10 @@ TAILQ_HEAD(ice_parser_list, ice_flow_parser);
 
 struct ice_fdir_filter_conf {
 	struct ice_fdir_fltr input;
+
+	struct ice_fdir_counter *counter; /* flow specific counter context */
+	struct rte_flow_action_count act_count;
+
 	uint64_t input_set;
 };
 
@@ -273,8 +277,11 @@ struct ice_fdir_fltr_pattern {
 #define ICE_FDIR_COUNTERS_PER_BLOCK		256
 #define ICE_FDIR_COUNTER_INDEX(base_idx) \
 				((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK)
+struct ice_fdir_counter_pool;
+
 struct ice_fdir_counter {
 	TAILQ_ENTRY(ice_fdir_counter) next;
+	struct ice_fdir_counter_pool *pool;
 	uint8_t shared;
 	uint32_t ref_cnt;
 	uint32_t id;
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 5fab7441b..a2da40f85 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -208,6 +208,60 @@ ice_release_fdir_filter_list(struct ice_pf *pf)
 		rte_hash_free(fdir_info->hash_table);
 }
 
+static struct ice_fdir_counter *
+ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_fdir_info *fdir_info = &pf->fdir;
+	struct ice_fdir_counter_pool_container *container =
+				&fdir_info->counter;
+	struct ice_fdir_counter_pool *pool = NULL;
+	struct ice_fdir_counter *counter_free = NULL;
+
+	TAILQ_FOREACH(pool, &container->pool_list, next) {
+		counter_free = TAILQ_FIRST(&pool->counter_list);
+		if (counter_free)
+			break;
+		counter_free = NULL;
+	}
+
+	if (!counter_free) {
+		PMD_DRV_LOG(ERR, "No free counter found\n");
+		return NULL;
+	}
+
+	counter_free->shared = shared;
+	counter_free->id = id;
+	counter_free->ref_cnt = 1;
+	counter_free->pool = pool;
+
+	/* reset statistic counter value */
+	ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
+	ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
+
+	TAILQ_REMOVE(&pool->counter_list, counter_free, next);
+	if (TAILQ_EMPTY(&pool->counter_list)) {
+		TAILQ_REMOVE(&container->pool_list, pool, next);
+		TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
+	}
+
+	return counter_free;
+}
+
+static void
+ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
+		      struct ice_fdir_counter *counter)
+{
+	if (!counter)
+		return;
+
+	if (--counter->ref_cnt == 0) {
+		struct ice_fdir_counter_pool *pool = counter->pool;
+
+		TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
+	}
+}
+
 /*
  * ice_fdir_setup - reserve and initialize the Flow Director resources
  * @pf: board private structure
@@ -756,12 +810,28 @@ ice_create_fdir_filter(struct ice_adapter *ad,
 		goto free_entry;
 	}
 
+	/* alloc counter for FDIR */
+	if (filter->input.cnt_ena) {
+		struct rte_flow_action_count *act_count = &filter->act_count;
+
+		filter->counter = ice_fdir_counter_alloc(pf,
+							 act_count->shared,
+							 act_count->id);
+		if (!filter->counter) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					"Failed to alloc FDIR counter.");
+			goto free_entry;
+		}
+		filter->input.cnt_index = filter->counter->hw_index;
+	}
+
 	ret = ice_add_del_fdir_filter(pf, filter, true);
 	if (ret) {
 		rte_flow_error_set(error, -ret,
 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "Add filter rule failed.");
-		goto free_entry;
+		goto free_counter;
 	}
 
 	rte_memcpy(entry, filter, sizeof(*entry));
@@ -770,7 +840,7 @@ ice_create_fdir_filter(struct ice_adapter *ad,
 		rte_flow_error_set(error, -ret,
 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "Insert entry to table failed.");
-		goto free_entry;
+		goto free_counter;
 	}
 
 	flow->rule = entry;
@@ -778,6 +848,12 @@ ice_create_fdir_filter(struct ice_adapter *ad,
 
 	return 0;
 
+free_counter:
+	if (filter->counter) {
+		ice_fdir_counter_free(pf, filter->counter);
+		filter->counter = NULL;
+	}
+
 free_entry:
 	rte_free(entry);
 	return -rte_errno;
@@ -796,6 +872,11 @@ ice_destroy_fdir_filter(struct ice_adapter *ad,
 
 	filter = (struct ice_fdir_filter_conf *)flow->rule;
 
+	if (filter->counter) {
+		ice_fdir_counter_free(pf, filter->counter);
+		filter->counter = NULL;
+	}
+
 	ice_fdir_extract_fltr_key(&key, filter);
 	entry = ice_fdir_entry_lookup(fdir_info, &key);
 	if (!entry) {
@@ -827,11 +908,55 @@ ice_destroy_fdir_filter(struct ice_adapter *ad,
 	return 0;
 }
 
+static int
+ice_query_fdir_filter(struct ice_adapter *ad,
+		      struct rte_flow *flow,
+		      void *data,
+		      struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_fdir_filter_conf *filter = flow->rule;
+	struct ice_fdir_counter *counter = filter->counter;
+	struct rte_flow_query_count *flow_stats = data;
+	uint64_t hits_lo, hits_hi;
+
+	if (!counter) {
+		rte_flow_error_set(error, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_ACTION,
+				  NULL,
+				  "FDIR counters not available");
+		return -rte_errno;
+	}
+
+	/*
+	 * Reading the low 32-bits latches the high 32-bits into a shadow
+	 * register. Reading the high 32-bit returns the value in the
+	 * shadow register.
+	 */
+	hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
+	hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
+
+	flow_stats->hits_set = 1;
+	flow_stats->hits = hits_lo | (hits_hi << 32);
+	flow_stats->bytes_set = 0;
+	flow_stats->bytes = 0;
+
+	if (flow_stats->reset) {
+		/* reset statistic counter value */
+		ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
+		ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
+	}
+
+	return 0;
+}
+
 static struct ice_flow_engine ice_fdir_engine = {
 	.init = ice_init_fdir_filter,
 	.uninit = ice_uninit_fdir_filter,
 	.create = ice_create_fdir_filter,
 	.destroy = ice_destroy_fdir_filter,
+	.query = ice_query_fdir_filter,
 	.type = ICE_FLOW_ENGINE_FDIR,
 };
 
@@ -900,8 +1025,10 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 	struct ice_pf *pf = &ad->pf;
 	const struct rte_flow_action_queue *act_q;
 	const struct rte_flow_action_mark *mark_spec = NULL;
+	const struct rte_flow_action_count *act_count;
 	uint32_t dest_num = 0;
 	uint32_t mark_num = 0;
+	uint32_t counter_num = 0;
 	int ret;
 
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -949,6 +1076,15 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 
 			mark_spec = actions->conf;
 			filter->input.fltr_id = mark_spec->id;
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			counter_num++;
+
+			act_count = actions->conf;
+			filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+			rte_memcpy(&filter->act_count, act_count,
+						sizeof(filter->act_count));
+
 			break;
 		default:
 			rte_flow_error_set(error, EINVAL,
@@ -972,6 +1108,13 @@ ice_fdir_parse_action(struct ice_adapter *ad,
 		return -rte_errno;
 	}
 
+	if (counter_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Too many count actions");
+		return -rte_errno;
+	}
+
 	return 0;
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 11/12] net/ice: add FDIR counter support for flow shared
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (9 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 10/12] net/ice: add FDIR counter support for flow id Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 12/12] net/ice: add FDIR non-word aligned field support Yahui Cao
  11 siblings, 0 replies; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 35 +++++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index a2da40f85..b226ea6d2 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -197,6 +197,29 @@ ice_fdir_counter_release(struct ice_pf *pf)
 	return 0;
 }
 
+static struct ice_fdir_counter *
+ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
+					*container,
+			       uint32_t id)
+{
+	struct ice_fdir_counter_pool *pool;
+	struct ice_fdir_counter *counter;
+	int i;
+
+	TAILQ_FOREACH(pool, &container->pool_list, next) {
+		for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
+			counter = &pool->counters[i];
+
+			if (counter->shared &&
+			    counter->ref_cnt &&
+			    counter->id == id)
+				return counter;
+		}
+	}
+
+	return NULL;
+}
+
 static void
 ice_release_fdir_filter_list(struct ice_pf *pf)
 {
@@ -218,6 +241,18 @@ ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
 	struct ice_fdir_counter_pool *pool = NULL;
 	struct ice_fdir_counter *counter_free = NULL;
 
+	if (shared) {
+		counter_free = ice_fdir_counter_shared_search(container, id);
+		if (counter_free) {
+			if (counter_free->ref_cnt + 1 == 0) {
+				rte_errno = E2BIG;
+				return NULL;
+			}
+			counter_free->ref_cnt++;
+			return counter_free;
+		}
+	}
+
 	TAILQ_FOREACH(pool, &container->pool_list, next) {
 		counter_free = TAILQ_FIRST(&pool->counter_list);
 		if (counter_free)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [dpdk-dev] [dpdk-dev 12/12] net/ice: add FDIR non-word aligned field support
  2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
                   ` (10 preceding siblings ...)
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 11/12] net/ice: add FDIR counter support for flow shared Yahui Cao
@ 2019-09-06 12:00 ` Yahui Cao
  11 siblings, 0 replies; 19+ messages in thread
From: Yahui Cao @ 2019-09-06 12:00 UTC (permalink / raw)
  To: Qiming Yang, Wenzhuo Lu
  Cc: dev, Qi Zhang, Xiaolong Ye, Beilei Xing, Yahui Cao

Add IPV4 TOS, TTL and Protocol fields support.
Add IPV6 version of TOS, TTL and Protocol fields support.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 41 +++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index b226ea6d2..18e0f5d48 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -9,6 +9,9 @@
 #include "ice_rxtx.h"
 #include "ice_generic_flow.h"
 
+#define ICE_FDIR_IPV6_TC_OFFSET		20
+#define ICE_IPV6_TC_MASK		(0xFF << ICE_FDIR_IPV6_TC_OFFSET)
+
 #define ICE_FDIR_INSET_ETH_IPV4 (\
 	ICE_INSET_DMAC | \
 	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
@@ -572,8 +575,14 @@ ice_parse_input_set(uint64_t inset, enum ice_flow_field *field)
 		{ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
 		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
 		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
+		{ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
+		{ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
+		{ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
 		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
 		{ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
+		{ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
+		{ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
+		{ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
 		{ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
 		{ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
 		{ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
@@ -1174,6 +1183,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
 	};
+	uint32_t vtc_flow_cpu;
 
 
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
@@ -1237,11 +1247,23 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 					input_set |= ICE_INSET_IPV4_SRC;
 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
 					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
 
 				filter->input.ip.v4.dst_ip =
 					ipv4_spec->hdr.src_addr;
 				filter->input.ip.v4.src_ip =
 					ipv4_spec->hdr.dst_addr;
+				filter->input.ip.v4.tos =
+					ipv4_spec->hdr.type_of_service;
+				filter->input.ip.v4.ttl =
+					ipv4_spec->hdr.time_to_live;
+				filter->input.ip.v4.proto =
+					ipv4_spec->hdr.next_proto_id;
 			}
 
 			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
@@ -1271,10 +1293,29 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
 					input_set |= ICE_INSET_IPV6_DST;
 
+				if ((ipv6_mask->hdr.vtc_flow &
+				     rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+				    == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+					input_set |= ICE_INSET_IPV6_TC;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_NEXT_HDR;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+
 				rte_memcpy(filter->input.ip.v6.dst_ip,
 					   ipv6_spec->hdr.src_addr, 16);
 				rte_memcpy(filter->input.ip.v6.src_ip,
 					   ipv6_spec->hdr.dst_addr, 16);
+
+				vtc_flow_cpu =
+				      rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+				filter->input.ip.v6.tc =
+					(uint8_t)(vtc_flow_cpu >>
+						  ICE_FDIR_IPV6_TC_OFFSET);
+				filter->input.ip.v6.proto =
+					ipv6_spec->hdr.proto;
+				filter->input.ip.v6.hlim =
+					ipv6_spec->hdr.hop_limits;
 			}
 
 			flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director Yahui Cao
@ 2019-09-07 11:01   ` Ye Xiaolong
  0 siblings, 0 replies; 19+ messages in thread
From: Ye Xiaolong @ 2019-09-07 11:01 UTC (permalink / raw)
  To: Yahui Cao; +Cc: Qiming Yang, Wenzhuo Lu, dev, Qi Zhang, Beilei Xing

On 09/06, Yahui Cao wrote:
>From: Beilei Xing <beilei.xing@intel.com>
>
>Enable flow director, include:
> - Create control VSI
> - Queue pair allocated and set up
> - Programming packet
>
>Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>---
> drivers/net/ice/Makefile          |   1 +
> drivers/net/ice/ice_ethdev.c      | 107 +++++--
> drivers/net/ice/ice_ethdev.h      |  19 ++
> drivers/net/ice/ice_fdir_filter.c | 139 +++++++++
> drivers/net/ice/ice_rxtx.c        | 448 ++++++++++++++++++++++++++++++
> drivers/net/ice/ice_rxtx.h        |   7 +
> drivers/net/ice/meson.build       |   3 +-

document and release note updates are needed here.

> 7 files changed, 704 insertions(+), 20 deletions(-)
> create mode 100644 drivers/net/ice/ice_fdir_filter.c
>
>diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
>index ae53c2646..cbbd03fcf 100644
>--- a/drivers/net/ice/Makefile
>+++ b/drivers/net/ice/Makefile
>@@ -62,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
> endif
> 
> SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
>+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c
> ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> 	CC_AVX2_SUPPORT=1
> else
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 647aca3ed..cb32f08df 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -1097,11 +1097,20 @@ ice_pf_sw_init(struct rte_eth_dev *dev)
> 				  hw->func_caps.common_cap.num_rxq);
> 
> 	pf->lan_nb_qps = pf->lan_nb_qp_max;
>+	if (hw->func_caps.fd_fltr_guar > 0 ||
>+	    hw->func_caps.fd_fltr_best_effort > 0) {
>+		pf->flags |= ICE_FLAG_FDIR;
>+		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
>+		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
>+	} else {
>+		pf->fdir_nb_qps = 0;
>+	}
>+	pf->fdir_qp_offset = 0;
> 
> 	return 0;
> }
> 
>-static struct ice_vsi *
>+struct ice_vsi *
> ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
> {
> 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
>@@ -1113,6 +1122,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
> 	struct rte_ether_addr mac_addr;
> 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
> 	uint8_t tc_bitmap = 0x1;
>+	uint16_t cfg;
> 
> 	/* hw->num_lports = 1 in NIC mode */
> 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
>@@ -1136,14 +1146,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
> 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
> 
> 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
>-	/* base_queue in used in queue mapping of VSI add/update command.
>-	 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
>-	 * cases in the first stage. Only Main VSI.
>-	 */
>-	vsi->base_queue = 0;
> 	switch (type) {
> 	case ICE_VSI_PF:
> 		vsi->nb_qps = pf->lan_nb_qps;
>+		vsi->base_queue = 1;
> 		ice_vsi_config_default_rss(&vsi_ctx.info);
> 		vsi_ctx.alloc_from_pool = true;
> 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
>@@ -1157,6 +1163,18 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
> 		vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
> 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
> 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
>+
>+		/* FDIR */
>+		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
>+			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
>+		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
>+		cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
>+		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
>+		vsi_ctx.info.max_fd_fltr_dedicated =
>+			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
>+		vsi_ctx.info.max_fd_fltr_shared =
>+			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
>+
> 		/* Enable VLAN/UP trip */
> 		ret = ice_vsi_config_tc_queue_mapping(vsi,
> 						      &vsi_ctx.info,
>@@ -1169,6 +1187,28 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
> 			goto fail_mem;
> 		}
> 
>+		break;
>+	case ICE_VSI_CTRL:
>+		vsi->nb_qps = pf->fdir_nb_qps;
>+		vsi->base_queue = ICE_FDIR_QUEUE_ID;
>+		vsi_ctx.alloc_from_pool = true;
>+		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
>+
>+		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
>+		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
>+		cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
>+		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
>+		vsi_ctx.info.sw_id = hw->port_info->sw_id;
>+		ret = ice_vsi_config_tc_queue_mapping(vsi,
>+						      &vsi_ctx.info,
>+						      ICE_DEFAULT_TCMAP);
>+		if (ret) {
>+			PMD_INIT_LOG(ERR,
>+				     "tc queue mapping with vsi failed, "
>+				     "err = %d",
>+				     ret);
>+			goto fail_mem;
>+		}
> 		break;
> 	default:
> 		/* for other types of VSI */
>@@ -1187,10 +1227,19 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
> 		}
> 		vsi->msix_intr = ret;
> 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
>+	} else if (type == ICE_VSI_CTRL) {
>+		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
>+		if (ret < 0) {
>+			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
>+				    vsi->vsi_id, ret);
>+		}
>+		vsi->msix_intr = ret;
>+		vsi->nb_msix = 1;
> 	} else {
> 		vsi->msix_intr = 0;
> 		vsi->nb_msix = 0;
> 	}
>+

Unnecessary empty line.

> 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
> 	if (ret != ICE_SUCCESS) {
> 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
>@@ -1202,20 +1251,22 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
> 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
> 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
> 
>-	/* MAC configuration */
>-	rte_memcpy(pf->dev_addr.addr_bytes,
>-		   hw->port_info->mac.perm_addr,
>-		   ETH_ADDR_LEN);
>+	if (type == ICE_VSI_PF) {
>+		/* MAC configuration */
>+		rte_memcpy(pf->dev_addr.addr_bytes,
>+			   hw->port_info->mac.perm_addr,
>+			   ETH_ADDR_LEN);
> 
>-	rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
>-	ret = ice_add_mac_filter(vsi, &mac_addr);
>-	if (ret != ICE_SUCCESS)
>-		PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
>+		rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
>+		ret = ice_add_mac_filter(vsi, &mac_addr);
>+		if (ret != ICE_SUCCESS)
>+			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
> 
>-	rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
>-	ret = ice_add_mac_filter(vsi, &mac_addr);
>-	if (ret != ICE_SUCCESS)
>-		PMD_INIT_LOG(ERR, "Failed to add MAC filter");
>+		rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
>+		ret = ice_add_mac_filter(vsi, &mac_addr);
>+		if (ret != ICE_SUCCESS)
>+			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
>+	}
> 
> 	/* At the beginning, only TC0. */
> 	/* What we need here is the maximam number of the TX queues.
>@@ -1253,7 +1304,9 @@ ice_send_driver_ver(struct ice_hw *hw)
> static int
> ice_pf_setup(struct ice_pf *pf)
> {
>+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> 	struct ice_vsi *vsi;
>+	uint16_t unused;
> 
> 	/* Clear all stats counters */
> 	pf->offset_loaded = FALSE;
>@@ -1262,6 +1315,13 @@ ice_pf_setup(struct ice_pf *pf)
> 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
> 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
> 
>+	/* force guaranteed filter pool for PF */
>+	ice_alloc_fd_guar_item(hw, &unused,
>+			       hw->func_caps.fd_fltr_guar);
>+	/* force shared filter pool for PF */
>+	ice_alloc_fd_shrd_item(hw, &unused,
>+			       hw->func_caps.fd_fltr_best_effort);
>+
> 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
> 	if (!vsi) {
> 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
>@@ -1698,7 +1758,7 @@ ice_dev_init(struct rte_eth_dev *dev)
> 	return ret;
> }
> 
>-static int
>+int
> ice_release_vsi(struct ice_vsi *vsi)
> {
> 	struct ice_hw *hw;
>@@ -1780,6 +1840,9 @@ ice_dev_stop(struct rte_eth_dev *dev)
> 	/* disable all queue interrupts */
> 	ice_vsi_disable_queues_intr(main_vsi);
> 
>+	if (pf->fdir.fdir_vsi)
>+		ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
>+
> 	/* Clear all queues and release mbufs */
> 	ice_clear_queues(dev);
> 
>@@ -2117,6 +2180,12 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
> 	/* Enable interrupts for all the queues */
> 	ice_vsi_enable_queues_intr(vsi);
> 
>+	/* Enable FDIR MSIX interrupt */
>+	if (pf->fdir.fdir_vsi) {
>+		ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
>+		ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
>+	}
>+
> 	rte_intr_enable(intr_handle);
> 
> 	return 0;
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index d1d07641d..c43242b63 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -249,6 +249,17 @@ TAILQ_HEAD(ice_flow_list, rte_flow);
> struct ice_flow_parser;
> TAILQ_HEAD(ice_parser_list, ice_flow_parser);
> 
>+/**
>+ *  A structure used to define fields of a FDIR related info.
>+ */
>+struct ice_fdir_info {
>+	struct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
>+	struct ice_tx_queue *txq;
>+	struct ice_rx_queue *rxq;
>+	void *prg_pkt;                 /* memory for fdir program packet */
>+	uint64_t dma_addr;             /* physic address of packet memory*/
>+};
>+
> struct ice_pf {
> 	struct ice_adapter *adapter; /* The adapter this PF associate to */
> 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
>@@ -268,6 +279,9 @@ struct ice_pf {
> 	uint16_t lan_nb_qp_max;
> 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
> 	uint16_t base_queue; /* The base queue pairs index  in the device */
>+	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
>+	uint16_t fdir_qp_offset;
>+	struct ice_fdir_info fdir; /* flow director info */
> 	struct ice_hw_port_stats stats_offset;
> 	struct ice_hw_port_stats stats;
> 	/* internal packet statistics, it should be excluded from the total */
>@@ -348,6 +362,11 @@ struct ice_vsi_vlan_pvid_info {
> #define ICE_PF_TO_ETH_DEV(pf) \
> 	(((struct ice_pf *)pf)->adapter->eth_dev)
> 
>+struct ice_vsi *
>+ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
>+int
>+ice_release_vsi(struct ice_vsi *vsi);
>+
> static inline int
> ice_align_floor(int n)
> {
>diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
>new file mode 100644
>index 000000000..03d143058
>--- /dev/null
>+++ b/drivers/net/ice/ice_fdir_filter.c
>@@ -0,0 +1,139 @@
>+#include <stdio.h>
>+#include <rte_flow.h>
>+#include "base/ice_fdir.h"
>+#include "base/ice_flow.h"
>+#include "base/ice_type.h"
>+#include "ice_ethdev.h"
>+#include "ice_rxtx.h"
>+#include "ice_generic_flow.h"
>+
>+static const struct rte_memzone *
>+ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
>+{
>+	const struct rte_memzone *mz;
>+
>+	mz = rte_memzone_lookup(name);
>+	if (mz)
>+		return mz;
>+
>+	mz = rte_memzone_reserve_aligned(name, len, socket_id,
>+					 RTE_MEMZONE_IOVA_CONTIG,
>+					 ICE_RING_BASE_ALIGN);
>+	return mz;

I think we can directly return rte_memzone_reserve_aligned(xxx) here.

>+}
>+
>+#define ICE_FDIR_MZ_NAME	"FDIR_MEMZONE"
>+
>+/*
>+ * ice_fdir_setup - reserve and initialize the Flow Director resources
>+ * @pf: board private structure
>+ */
>+static int
>+ice_fdir_setup(struct ice_pf *pf)
>+{
>+	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
>+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
>+	const struct rte_memzone *mz = NULL;
>+	char z_name[RTE_MEMZONE_NAMESIZE];
>+	struct ice_vsi *vsi;
>+	int err = ICE_SUCCESS;
>+
>+	if ((pf->flags & ICE_FLAG_FDIR) == 0) {
>+		PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
>+		return -ENOTSUP;
>+	}
>+
>+	PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
>+		    " fd_fltr_best_effort = %u.",
>+		    hw->func_caps.fd_fltr_guar,
>+		    hw->func_caps.fd_fltr_best_effort);
>+
>+	if (pf->fdir.fdir_vsi) {
>+		PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
>+		return ICE_SUCCESS;
>+	}
>+
>+	/* make new FDIR VSI */
>+	vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
>+	if (!vsi) {
>+		PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
>+		return -EINVAL;
>+	}
>+	pf->fdir.fdir_vsi = vsi;
>+
>+	/*Fdir tx queue setup*/
>+	err = ice_fdir_setup_tx_resources(pf);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
>+		goto fail_setup_tx;
>+	}
>+
>+	/*Fdir rx queue setup*/
>+	err = ice_fdir_setup_rx_resources(pf);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
>+		goto fail_setup_rx;
>+	}
>+
>+	err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
>+		goto fail_mem;
>+	}
>+
>+	err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
>+		goto fail_mem;
>+	}
>+
>+	/* reserve memory for the fdir programming packet */
>+	snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
>+		 ICE_FDIR_MZ_NAME,
>+		 eth_dev->data->port_id);
>+	mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
>+	if (!mz) {
>+		PMD_DRV_LOG(ERR, "Cannot init memzone for "
>+			    "flow director program packet.");
>+		err = -ENOMEM;
>+		goto fail_mem;
>+	}
>+	pf->fdir.prg_pkt = mz->addr;
>+	pf->fdir.dma_addr = mz->iova;
>+
>+	PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
>+		    vsi->base_queue);
>+	return ICE_SUCCESS;
>+
>+fail_mem:
>+	ice_rx_queue_release(pf->fdir.rxq);
>+	pf->fdir.rxq = NULL;
>+fail_setup_rx:
>+	ice_tx_queue_release(pf->fdir.txq);
>+	pf->fdir.txq = NULL;
>+fail_setup_tx:
>+	ice_release_vsi(vsi);
>+	pf->fdir.fdir_vsi = NULL;
>+	return err;
>+}
>+
>+static int
>+ice_init_fdir_filter(struct ice_adapter *ad)
>+{
>+	struct ice_pf *pf = &ad->pf;
>+	int ret;
>+
>+	ret = ice_fdir_setup(pf);
>+
>+	return ret;

return ice_fdir_setup(pf);

>+}
>+
>+static struct ice_flow_engine ice_fdir_engine = {
>+	.init = ice_init_fdir_filter,
>+	.type = ICE_FLOW_ENGINE_FDIR,
>+};
>+
>+RTE_INIT(ice_fdir_init_log)
>+{
>+	ice_register_flow_engine(&ice_fdir_engine);
>+}
>diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
>index 0282b5375..bd802e350 100644
>--- a/drivers/net/ice/ice_rxtx.c
>+++ b/drivers/net/ice/ice_rxtx.c
>@@ -474,6 +474,175 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
> 	return 0;
> }
> 
>+static enum ice_status
>+ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
>+{
>+	struct ice_vsi *vsi = rxq->vsi;
>+	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
>+	struct ice_rlan_ctx rx_ctx;
>+	enum ice_status err;
>+	uint32_t regval;
>+
>+	/**
>+	 * The kernel driver uses flex descriptor. It sets the register
>+	 * to flex descriptor mode.
>+	 * DPDK uses legacy descriptor. It should set the register back
>+	 * to the default value, then uses legacy descriptor mode.
>+	 */

I remember haiyue has one patchset to enable flex descriptor in this release,
do we have some conflicts here?

>+	regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
>+		 QRXFLXP_CNTXT_RXDID_PRIO_M;
>+	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
>+
>+	rxq->rx_hdr_len = 0;
>+	rxq->rx_buf_len = 1024;
>+
>+	memset(&rx_ctx, 0, sizeof(rx_ctx));
>+
>+	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
>+	rx_ctx.qlen = rxq->nb_rx_desc;
>+	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
>+	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
>+	rx_ctx.dtype = 0; /* No Header Split mode */
>+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
>+	rx_ctx.dsize = 1; /* 32B descriptors */
>+#endif
>+	rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
>+	/* TPH: Transaction Layer Packet (TLP) processing hints */
>+	rx_ctx.tphrdesc_ena = 1;
>+	rx_ctx.tphwdesc_ena = 1;
>+	rx_ctx.tphdata_ena = 1;
>+	rx_ctx.tphhead_ena = 1;
>+	/* Low Receive Queue Threshold defined in 64 descriptors units.
>+	 * When the number of free descriptors goes below the lrxqthresh,
>+	 * an immediate interrupt is triggered.
>+	 */
>+	rx_ctx.lrxqthresh = 2;
>+	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
>+	rx_ctx.l2tsel = 1;
>+	rx_ctx.showiv = 0;
>+	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
>+
>+	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
>+			    rxq->queue_id);
>+		return -EINVAL;
>+	}
>+	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
>+			    rxq->queue_id);
>+		return -EINVAL;
>+	}
>+
>+	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
>+
>+	/* Init the Rx tail register*/
>+	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
>+
>+	return 0;
>+}
>+
>+int
>+ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
>+{
>+	struct ice_rx_queue *rxq;
>+	int err;
>+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>+
>+	PMD_INIT_FUNC_TRACE();
>+
>+	rxq = pf->fdir.rxq;
>+	if (!rxq || !rxq->q_set) {
>+		PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
>+			    rx_queue_id);
>+		return -EINVAL;
>+	}
>+
>+	err = ice_fdir_program_hw_rx_queue(rxq);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
>+			    rx_queue_id);
>+		return -EIO;
>+	}
>+
>+	rte_wmb();
>+
>+	/* Init the RX tail register. */
>+	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
>+
>+	err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
>+			    rx_queue_id);
>+
>+		ice_reset_rx_queue(rxq);
>+		return -EINVAL;
>+	}
>+
>+	return 0;
>+}
>+
>+int
>+ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
>+{
>+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>+	struct ice_tx_queue *txq;
>+	int err;
>+	struct ice_vsi *vsi;
>+	struct ice_hw *hw;
>+	struct ice_aqc_add_tx_qgrp txq_elem;
>+	struct ice_tlan_ctx tx_ctx;
>+
>+	PMD_INIT_FUNC_TRACE();
>+
>+	txq = pf->fdir.txq;
>+	if (!txq || !txq->q_set) {
>+		PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
>+			    tx_queue_id);
>+		return -EINVAL;
>+	}
>+
>+	vsi = txq->vsi;
>+	hw = ICE_VSI_TO_HW(vsi);
>+
>+	memset(&txq_elem, 0, sizeof(txq_elem));
>+	memset(&tx_ctx, 0, sizeof(tx_ctx));
>+	txq_elem.num_txqs = 1;
>+	txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
>+
>+	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
>+	tx_ctx.qlen = txq->nb_tx_desc;
>+	tx_ctx.pf_num = hw->pf_id;
>+	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
>+	tx_ctx.src_vsi = vsi->vsi_id;
>+	tx_ctx.port_num = hw->port_info->lport;
>+	tx_ctx.tso_ena = 1; /* tso enable */
>+	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
>+	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
>+
>+	ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
>+		    ice_tlan_ctx_info);
>+
>+	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
>+
>+	/* Init the Tx tail register*/
>+	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
>+
>+	/* Fix me, we assume TC always 0 here */
>+	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
>+			      &txq_elem, sizeof(txq_elem), NULL);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
>+		return -EIO;
>+	}
>+	/* store the schedule node id */
>+	txq->q_teid = txq_elem.txqs[0].q_teid;
>+
>+	return 0;
>+}
>+

ice_fdir_program_hw_rx_queue/ ice_fdir_setup_rx_resources / ice_fdir_setup_tx_resources
look like similar as ice_program_hw_rx_queue / ice_setup_rx_resources / ice_setup_tx_resources,
any chance to resue existing code to avoid duplication?

Thanks,
Xiaolong


> /* Free all mbufs for descriptors in tx queue */
> static void
> _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
>@@ -997,6 +1166,10 @@ ice_rxd_status_to_pkt_flags(uint64_t qword)
> 		  ICE_RX_DESC_FLTSTAT_RSS_HASH) ==
> 		 ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
> 
>+	/* Check if FDIR Match */
>+	flags |= (qword & (1 << ICE_RX_DESC_STATUS_FLM_S) ?
>+		  PKT_RX_FDIR : 0);
>+
> 	return flags;
> }
> 
>@@ -1060,6 +1233,33 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
> 		   mb->vlan_tci, mb->vlan_tci_outer);
> }
> 
>+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_M   0x03
>+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01
>+
>+static inline uint64_t
>+ice_rxd_build_fdir(volatile union ice_rx_desc *rxdp, struct rte_mbuf *mb)
>+{
>+	uint64_t flags = 0;
>+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
>+	uint16_t flexbh;
>+
>+	flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
>+		ICE_RX_DESC_EXT_STATUS_FLEXBH_S) &
>+		ICE_RX_DESC_EXT_STATUS_FLEXBH_M;
>+
>+	if (flexbh == ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
>+		mb->hash.fdir.hi =
>+			rte_le_to_cpu_32(rxdp->wb.qword3.fd_id);
>+		flags |= PKT_RX_FDIR_ID;
>+	}
>+#else
>+	mb->hash.fdir.hi =
>+		rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
>+	flags |= PKT_RX_FDIR_ID;
>+#endif
>+	return flags;
>+}
>+
> #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
> #define ICE_LOOK_AHEAD 8
> #if (ICE_LOOK_AHEAD != 8)
>@@ -1127,6 +1327,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
> 				mb->hash.rss =
> 					rte_le_to_cpu_32(
> 						rxdp[j].wb.qword0.hi_dword.rss);
>+			if (pkt_flags & PKT_RX_FDIR)
>+				pkt_flags |= ice_rxd_build_fdir(&rxdp[j], mb);
> 			mb->packet_type = ptype_tbl[(uint8_t)(
> 						(qword1 &
> 						 ICE_RXD_QW1_PTYPE_M) >>
>@@ -1448,6 +1650,8 @@ ice_recv_scattered_pkts(void *rx_queue,
> 		if (pkt_flags & PKT_RX_RSS_HASH)
> 			first_seg->hash.rss =
> 				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
>+		if (pkt_flags & PKT_RX_FDIR)
>+			pkt_flags |= ice_rxd_build_fdir(&rxd, first_seg);
> 
> 		first_seg->ol_flags |= pkt_flags;
> 		/* Prefetch data of first segment, if configured to do so. */
>@@ -1635,6 +1839,127 @@ ice_free_queues(struct rte_eth_dev *dev)
> 	dev->data->nb_tx_queues = 0;
> }
> 
>+#define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
>+#define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
>+
>+int
>+ice_fdir_setup_tx_resources(struct ice_pf *pf)
>+{
>+	struct ice_tx_queue *txq;
>+	const struct rte_memzone *tz = NULL;
>+	uint32_t ring_size;
>+	struct rte_eth_dev *dev;
>+
>+	if (!pf) {
>+		PMD_DRV_LOG(ERR, "PF is not available");
>+		return -EINVAL;
>+	}
>+
>+	dev = pf->adapter->eth_dev;
>+
>+	/* Allocate the TX queue data structure. */
>+	txq = rte_zmalloc_socket("ice fdir tx queue",
>+				 sizeof(struct ice_tx_queue),
>+				 RTE_CACHE_LINE_SIZE,
>+				 SOCKET_ID_ANY);
>+	if (!txq) {
>+		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
>+			    "tx queue structure.");
>+		return -ENOMEM;
>+	}
>+
>+	/* Allocate TX hardware ring descriptors. */
>+	ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
>+	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
>+
>+	tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
>+				      ICE_FDIR_QUEUE_ID, ring_size,
>+				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
>+	if (!tz) {
>+		ice_tx_queue_release(txq);
>+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
>+		return -ENOMEM;
>+	}
>+
>+	txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
>+	txq->queue_id = ICE_FDIR_QUEUE_ID;
>+	txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
>+	txq->vsi = pf->fdir.fdir_vsi;
>+
>+	txq->tx_ring_dma = tz->iova;
>+	txq->tx_ring = (struct ice_tx_desc *)tz->addr;
>+	/*
>+	 * don't need to allocate software ring and reset for the fdir
>+	 * program queue just set the queue has been configured.
>+	 */
>+	txq->q_set = TRUE;
>+	pf->fdir.txq = txq;
>+
>+	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
>+
>+	return ICE_SUCCESS;
>+}
>+
>+int
>+ice_fdir_setup_rx_resources(struct ice_pf *pf)
>+{
>+	struct ice_rx_queue *rxq;
>+	const struct rte_memzone *rz = NULL;
>+	uint32_t ring_size;
>+	struct rte_eth_dev *dev;
>+
>+	if (!pf) {
>+		PMD_DRV_LOG(ERR, "PF is not available");
>+		return -EINVAL;
>+	}
>+
>+	dev = pf->adapter->eth_dev;
>+
>+	/* Allocate the RX queue data structure. */
>+	rxq = rte_zmalloc_socket("ice fdir rx queue",
>+				 sizeof(struct ice_rx_queue),
>+				 RTE_CACHE_LINE_SIZE,
>+				 SOCKET_ID_ANY);
>+	if (!rxq) {
>+		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
>+			    "rx queue structure.");
>+		return -ENOMEM;
>+	}
>+
>+	/* Allocate RX hardware ring descriptors. */
>+	ring_size = sizeof(union ice_rx_desc) * ICE_FDIR_NUM_RX_DESC;
>+	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
>+
>+	rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
>+				      ICE_FDIR_QUEUE_ID, ring_size,
>+				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
>+	if (!rz) {
>+		ice_rx_queue_release(rxq);
>+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
>+		return -ENOMEM;
>+	}
>+
>+	rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
>+	rxq->queue_id = ICE_FDIR_QUEUE_ID;
>+	rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
>+	rxq->vsi = pf->fdir.fdir_vsi;
>+
>+	rxq->rx_ring_dma = rz->iova;
>+	memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * sizeof(union ice_rx_desc));
>+	rxq->rx_ring = (union ice_rx_desc *)rz->addr;
>+
>+	/*
>+	 * Don't need to allocate software ring and reset for the fdir
>+	 * rx queue, just set the queue has been configured.
>+	 */
>+	rxq->q_set = TRUE;
>+	pf->fdir.rxq = rxq;
>+
>+	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
>+
>+	return ICE_SUCCESS;
>+}
>+
> uint16_t
> ice_recv_pkts(void *rx_queue,
> 	      struct rte_mbuf **rx_pkts,
>@@ -1716,6 +2041,8 @@ ice_recv_pkts(void *rx_queue,
> 		if (pkt_flags & PKT_RX_RSS_HASH)
> 			rxm->hash.rss =
> 				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
>+		if (pkt_flags & PKT_RX_FDIR)
>+			pkt_flags |= ice_rxd_build_fdir(&rxd, rxm);
> 		rxm->ol_flags |= pkt_flags;
> 		/* copy old mbuf to rx_pkts */
> 		rx_pkts[nb_rx++] = rxm;
>@@ -3061,3 +3388,124 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev)
> 	for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
> 		ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
> }
>+
>+/*
>+ * check the programming status descriptor in rx queue.
>+ * done after Programming Flow Director is programmed on
>+ * tx queue
>+ */
>+static inline int
>+ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
>+{
>+	volatile union ice_rx_desc *rxdp;
>+	uint64_t qword1;
>+	uint32_t rx_status;
>+	uint32_t len, id;
>+	uint32_t error;
>+	int ret = 0;
>+
>+	rxdp = &rxq->rx_ring[rxq->rx_tail];
>+	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
>+	rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
>+			>> ICE_RXD_QW1_STATUS_S;
>+
>+	if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
>+		len = qword1 >> ICE_RX_PROG_STATUS_DESC_LEN_S;
>+		id = (qword1 & ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M) >>
>+			    ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S;
>+
>+		if (len  == ICE_RX_PROG_STATUS_DESC_LEN &&
>+		    id == ICE_RX_PROG_STATUS_DESC_FD_FLTR_STATUS) {
>+			error = (qword1 &
>+				ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M) >>
>+				ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S;
>+			if (error == (0x1 <<
>+				ICE_RX_PROG_STATUS_DESC_FD_TBL_FULL_S)) {
>+				PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
>+					    " (FD_ID %u): programming status"
>+					    " reported.",
>+					    rxdp->wb.qword0.hi_dword.fd_id);
>+				ret = -1;
>+			} else if (error == (0x1 <<
>+				ICE_RX_PROG_STATUS_DESC_NO_FD_ENTRY_S)) {
>+				PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
>+					    " (FD_ID %u): programming status"
>+					    " reported.",
>+					    rxdp->wb.qword0.hi_dword.fd_id);
>+				ret = -1;
>+			} else {
>+				PMD_DRV_LOG(ERR, "invalid programming status"
>+					    " reported, error = %u.", error);
>+			}
>+		} else {
>+			PMD_DRV_LOG(INFO, "unknown programming status"
>+				    " reported, len = %d, id = %u.", len, id);
>+		}
>+		rxdp->wb.qword1.status_error_len = 0;
>+		rxq->rx_tail++;
>+		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
>+			rxq->rx_tail = 0;
>+		if (rxq->rx_tail == 0)
>+			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
>+		else
>+			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
>+	}
>+
>+	return ret;
>+}
>+
>+#define ICE_FDIR_MAX_WAIT_US 10000
>+
>+int
>+ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
>+{
>+	struct ice_tx_queue *txq = pf->fdir.txq;
>+	struct ice_rx_queue *rxq = pf->fdir.rxq;
>+	volatile struct ice_fltr_desc *fdirdp;
>+	volatile struct ice_tx_desc *txdp;
>+	uint32_t td_cmd;
>+	uint16_t i;
>+
>+	fdirdp = (volatile struct ice_fltr_desc *)
>+		(&txq->tx_ring[txq->tx_tail]);
>+	fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
>+	fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
>+
>+	txdp = &txq->tx_ring[txq->tx_tail + 1];
>+	txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
>+	td_cmd = ICE_TX_DESC_CMD_EOP |
>+		ICE_TX_DESC_CMD_RS  |
>+		ICE_TX_DESC_CMD_DUMMY;
>+
>+	txdp->cmd_type_offset_bsz =
>+		ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
>+
>+	txq->tx_tail += 2;
>+	if (txq->tx_tail >= txq->nb_tx_desc)
>+		txq->tx_tail = 0;
>+	/* Update the tx tail register */
>+	rte_wmb();
>+	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
>+	for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
>+		if ((txdp->cmd_type_offset_bsz &
>+		     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
>+		    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
>+			break;
>+		rte_delay_us(1);
>+	}
>+	if (i >= ICE_FDIR_MAX_WAIT_US) {
>+		PMD_DRV_LOG(ERR,
>+			    "Failed to program FDIR filter: time out to get DD on tx queue.");
>+		return -ETIMEDOUT;
>+	}
>+
>+	for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
>+		if (ice_check_fdir_programming_status(rxq) >= 0)
>+			return 0;
>+		rte_delay_us(1);
>+	}
>+
>+	PMD_DRV_LOG(ERR,
>+		    "Failed to program FDIR filter: programming status reported.");
>+	return -ETIMEDOUT;
>+}
>diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
>index e9214110c..450db0244 100644
>--- a/drivers/net/ice/ice_rxtx.h
>+++ b/drivers/net/ice/ice_rxtx.h
>@@ -36,6 +36,8 @@
> #define ICE_TX_MAX_FREE_BUF_SZ      64
> #define ICE_DESCS_PER_LOOP          4
> 
>+#define ICE_FDIR_PKT_LEN	512
>+
> typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
> typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
> 
>@@ -147,10 +149,14 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
> int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
> int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
> int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
>+int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
>+int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
> void ice_rx_queue_release(void *rxq);
> void ice_tx_queue_release(void *txq);
> void ice_clear_queues(struct rte_eth_dev *dev);
> void ice_free_queues(struct rte_eth_dev *dev);
>+int ice_fdir_setup_tx_resources(struct ice_pf *pf);
>+int ice_fdir_setup_rx_resources(struct ice_pf *pf);
> uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> 		       uint16_t nb_pkts);
> uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>@@ -188,4 +194,5 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
> 					  uint16_t nb_pkts);
> uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
> 				uint16_t nb_pkts);
>+int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
> #endif /* _ICE_RXTX_H_ */
>diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
>index 36b4b3c85..53846442a 100644
>--- a/drivers/net/ice/meson.build
>+++ b/drivers/net/ice/meson.build
>@@ -10,7 +10,8 @@ sources = files(
> 	'ice_ethdev.c',
> 	'ice_rxtx.c',
> 	'ice_switch_filter.c',
>-	'ice_generic_flow.c'
>+	'ice_generic_flow.c',
>+	'ice_fdir_filter.c'
> 	)
> 
> deps += ['hash']
>-- 
>2.17.1
>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [dpdk-dev 02/12] net/ice: tear down flow director
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 02/12] net/ice: tear down " Yahui Cao
@ 2019-09-07 11:21   ` Ye Xiaolong
  0 siblings, 0 replies; 19+ messages in thread
From: Ye Xiaolong @ 2019-09-07 11:21 UTC (permalink / raw)
  To: Yahui Cao; +Cc: Qiming Yang, Wenzhuo Lu, dev, Qi Zhang, Beilei Xing

On 09/06, Yahui Cao wrote:
>From: Beilei Xing <beilei.xing@intel.com>
>
>Release resources on flow director, include:
> - Release queue.
> - Release VSI.
>
>Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>---
> drivers/net/ice/ice_fdir_filter.c | 40 ++++++++++++++++++++++
> drivers/net/ice/ice_rxtx.c        | 57 +++++++++++++++++++++++++++++++
> drivers/net/ice/ice_rxtx.h        |  2 ++

Update document and release as well, and what about combine this patch with
prior patch, I think together they enable the FDIR engine.

> 3 files changed, 99 insertions(+)
>
>diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
>index 03d143058..451ef92b2 100644
>--- a/drivers/net/ice/ice_fdir_filter.c
>+++ b/drivers/net/ice/ice_fdir_filter.c
>@@ -117,6 +117,37 @@ ice_fdir_setup(struct ice_pf *pf)
> 	return err;
> }
> 
>+/*
>+ * ice_fdir_teardown - release the Flow Director resources
>+ * @pf: board private structure
>+ */
>+static void
>+ice_fdir_teardown(struct ice_pf *pf)
>+{
>+	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
>+	struct ice_vsi *vsi;
>+	int err;
>+
>+	vsi = pf->fdir.fdir_vsi;
>+	if (!vsi)
>+		return;
>+
>+	err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
>+	if (err)
>+		PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
>+
>+	err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
>+	if (err)
>+		PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
>+
>+	ice_tx_queue_release(pf->fdir.txq);
>+	pf->fdir.txq = NULL;
>+	ice_rx_queue_release(pf->fdir.rxq);
>+	pf->fdir.rxq = NULL;
>+	ice_release_vsi(vsi);
>+	pf->fdir.fdir_vsi = NULL;
>+}
>+
> static int
> ice_init_fdir_filter(struct ice_adapter *ad)
> {
>@@ -128,8 +159,17 @@ ice_init_fdir_filter(struct ice_adapter *ad)
> 	return ret;
> }
> 
>+static void
>+ice_uninit_fdir_filter(struct ice_adapter *ad)
>+{
>+	struct ice_pf *pf = &ad->pf;
>+
>+	ice_fdir_teardown(pf);
>+}
>+
> static struct ice_flow_engine ice_fdir_engine = {
> 	.init = ice_init_fdir_filter,
>+	.uninit = ice_uninit_fdir_filter,
> 	.type = ICE_FLOW_ENGINE_FDIR,
> };
> 
>diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
>index bd802e350..e41fcb194 100644
>--- a/drivers/net/ice/ice_rxtx.c
>+++ b/drivers/net/ice/ice_rxtx.c
>@@ -748,6 +748,63 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
> 	return 0;
> }
> 
>+int
>+ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
>+{
>+	struct ice_rx_queue *rxq;
>+	int err;
>+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>+
>+	rxq = pf->fdir.rxq;
>+
>+	err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
>+	if (err) {
>+		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
>+			    rx_queue_id);
>+		return -EINVAL;
>+	}
>+	ice_rx_queue_release_mbufs(rxq);
>+
>+	return 0;
>+}
>+
>+int
>+ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
>+{
>+	struct ice_tx_queue *txq;
>+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>+	struct ice_vsi *vsi = pf->main_vsi;
>+	enum ice_status status;
>+	uint16_t q_ids[1];
>+	uint32_t q_teids[1];
>+	uint16_t q_handle = tx_queue_id;
>+
>+	txq = pf->fdir.txq;
>+	if (!txq) {
>+		PMD_DRV_LOG(ERR, "TX queue %u is not available",
>+			    tx_queue_id);
>+		return -EINVAL;
>+	}
>+	vsi = txq->vsi;
>+
>+	q_ids[0] = txq->reg_idx;
>+	q_teids[0] = txq->q_teid;
>+
>+	/* Fix me, we assume TC always 0 here */
>+	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
>+				 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
>+	if (status != ICE_SUCCESS) {
>+		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
>+		return -EINVAL;
>+	}
>+
>+	ice_tx_queue_release_mbufs(txq);
>+
>+	return 0;
>+}

Better to reuse ice_rx/tx_queue_stop.

Thanks,
Xiaolong

>+
> int
> ice_rx_queue_setup(struct rte_eth_dev *dev,
> 		   uint16_t queue_idx,
>diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
>index 450db0244..24376c0d5 100644
>--- a/drivers/net/ice/ice_rxtx.h
>+++ b/drivers/net/ice/ice_rxtx.h
>@@ -151,6 +151,8 @@ int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
> int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
> int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
> int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
>+int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
>+int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
> void ice_rx_queue_release(void *rxq);
> void ice_tx_queue_release(void *txq);
> void ice_clear_queues(struct rte_eth_dev *dev);
>-- 
>2.17.1
>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [dpdk-dev 03/12] net/ice: enable input set configuration
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 03/12] net/ice: enable input set configuration Yahui Cao
@ 2019-09-07 12:32   ` Ye Xiaolong
  0 siblings, 0 replies; 19+ messages in thread
From: Ye Xiaolong @ 2019-09-07 12:32 UTC (permalink / raw)
  To: Yahui Cao; +Cc: Qiming Yang, Wenzhuo Lu, dev, Qi Zhang, Beilei Xing

On 09/06, Yahui Cao wrote:
>From: Beilei Xing <beilei.xing@intel.com>
>
>Configure input set, include:
> - Parse input set.
> - Check the segment.
> - Create profile.

I'd prefer more descriptive sentences in commit log than a few bullets.

>
>Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>---
> drivers/net/ice/ice_ethdev.h      |   3 +
> drivers/net/ice/ice_fdir_filter.c | 245 ++++++++++++++++++++++++++++++
> 2 files changed, 248 insertions(+)
>
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index c43242b63..ea68858d1 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -366,6 +366,9 @@ struct ice_vsi *
> ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
> int
> ice_release_vsi(struct ice_vsi *vsi);
>+int
>+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
>+			uint64_t input_set);
> 
> static inline int
> ice_align_floor(int n)
>diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
>index 451ef92b2..0840c3b4b 100644
>--- a/drivers/net/ice/ice_fdir_filter.c
>+++ b/drivers/net/ice/ice_fdir_filter.c
>@@ -148,6 +148,251 @@ ice_fdir_teardown(struct ice_pf *pf)
> 	pf->fdir.fdir_vsi = NULL;
> }
> 
>+static void
>+ice_fdir_rm_prof(struct ice_hw *hw, enum ice_fltr_ptype ptype)
>+{
>+	struct ice_fd_hw_prof *hw_prof = hw->fdir_prof[ptype];
>+	uint64_t prof_id;
>+	uint16_t vsi_num;
>+	int tun;
>+	int i;
>+
>+	if (!hw->fdir_prof || !hw->fdir_prof[ptype])
>+		return;
>+
>+	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
>+		if (!hw_prof->fdir_seg[tun])
>+			break;
>+		prof_id = ptype + tun * ICE_FLTR_PTYPE_MAX;
>+		for (i = 0; i < hw_prof->cnt; i++) {
>+			if (hw_prof->entry_h[i][tun]) {
>+				vsi_num = ice_get_hw_vsi_num(hw,
>+							     hw_prof->vsi_h[i]);
>+				ice_rem_prof_id_flow(hw, ICE_BLK_FD,
>+						     vsi_num, ptype);
>+				ice_flow_rem_entry(hw,
>+						   hw_prof->entry_h[i][tun]);
>+				hw_prof->entry_h[i][tun] = 0;
>+			}
>+		}
>+		ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
>+		rte_free(hw_prof->fdir_seg[tun]);
>+		hw_prof->fdir_seg[tun] = NULL;
>+	}
>+	for (i = 0; i < hw_prof->cnt; i++)
>+		hw_prof->vsi_h[i] = 0;

memset(hw_prof->vsi_h, 0, hw_prof->cnt);

>+	hw_prof->cnt = 0;
>+}
>+
>+static int
>+ice_fdir_cfg_hw_tbl(struct ice_pf *pf, struct ice_vsi *vsi,
>+		    struct ice_vsi *ctrl_vsi,
>+		    struct ice_flow_seg_info *seg,
>+		    enum ice_fltr_ptype ptype,
>+		    bool is_tunnel)
>+{
>+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
>+	enum ice_flow_dir dir = ICE_FLOW_RX;
>+	struct ice_flow_seg_info *ori_seg;
>+	struct ice_fd_hw_prof *hw_prof;
>+	struct ice_flow_prof *prof;
>+	uint64_t entry_1 = 0;
>+	uint64_t entry_2 = 0;
>+	uint16_t vsi_num;
>+	int ret;
>+	uint64_t prof_id;
>+
>+	if (!hw->fdir_prof) {
>+		hw->fdir_prof = (struct ice_fd_hw_prof **)
>+			ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
>+				   sizeof(*hw->fdir_prof));
>+		if (!hw->fdir_prof)
>+			return -ENOMEM;
>+	}
>+	if (!hw->fdir_prof[ptype]) {
>+		hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
>+			ice_malloc(hw, sizeof(**hw->fdir_prof));
>+		if (!hw->fdir_prof[ptype])
>+			return -ENOMEM;
>+	}

when will we free hw->fdir_prof and hw->fdir_prof[ptype]?

>+
>+	hw_prof = hw->fdir_prof[ptype];
>+	ori_seg = hw_prof->fdir_seg[is_tunnel];
>+	if (ori_seg) {
>+		if (!memcmp(ori_seg, seg, sizeof(*seg)))
>+			return -EAGAIN;
>+		if (hw->fdir_fltr_cnt[ptype])
>+			return -EINVAL;
>+
>+		ice_fdir_rm_prof(hw, ptype);
>+	}
>+
>+	prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
>+	ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
>+				(is_tunnel) ? 2 : 1, NULL, 0, &prof);
>+	if (ret)
>+		return ret;
>+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
>+				 vsi->idx, ICE_FLOW_PRIO_NORMAL,
>+				 seg, NULL, 0, &entry_1);
>+	if (ret) {
>+		PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
>+			    ptype);
>+		goto err_add_prof;
>+	}
>+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
>+				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
>+				 seg, NULL, 0, &entry_2);
>+	if (ret) {
>+		PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
>+			    ptype);
>+		goto err_add_entry;
>+	}
>+
>+	hw_prof->cnt = 0;
>+	hw_prof->fdir_seg[is_tunnel] = seg;
>+	hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
>+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
>+	hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
>+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
>+
>+	return ret;
>+
>+err_add_entry:
>+	vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
>+	ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
>+	ice_flow_rem_entry(hw, entry_1);
>+err_add_prof:
>+	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
>+
>+	return ret;
>+}
>+
>+static void
>+ice_parse_input_set(uint64_t inset, enum ice_flow_field *field)
>+{
>+	uint32_t i, j;
>+
>+	struct ice_inset_map {
>+		uint64_t inset;
>+		enum ice_flow_field fld;
>+	};
>+	static const struct ice_inset_map ice_inset_map[] = {
>+		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
>+		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
>+		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
>+		{ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
>+		{ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
>+		{ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
>+		{ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
>+		{ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
>+		{ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
>+		{ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
>+	};
>+
>+	for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
>+		if (inset & ice_inset_map[i].inset)
>+			field[j++] = ice_inset_map[i].fld;
>+	}
>+}
>+
>+int
>+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
>+			uint64_t input_set)
>+{
>+	struct ice_flow_seg_info *seg, *seg_tun;
>+	enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
>+	int i, ret;
>+
>+	if (!input_set)
>+		return -EINVAL;
>+
>+	seg = (struct ice_flow_seg_info *)
>+		ice_malloc(hw, sizeof(*seg));
>+	if (!seg) {
>+		PMD_DRV_LOG(ERR, "No memory can be allocated");
>+		return -ENOMEM;
>+	}
>+
>+	seg_tun = (struct ice_flow_seg_info *)
>+		ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
>+	if (!seg_tun) {
>+		PMD_DRV_LOG(ERR, "No memory can be allocated");
>+		rte_free(seg);
>+		return -ENOMEM;
>+	}
>+
>+	for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
>+		field[i] = ICE_FLOW_FIELD_IDX_MAX;

memset(field, ICE_FLOW_FIELD_IDX_MAX, ICE_FLOW_FIELD_IDX_MAX) ?

>+	ice_parse_input_set(input_set, field);
>+
>+	switch (flow) {
>+	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
>+				  ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
>+				  ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
>+				  ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
>+				  ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
>+				  ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
>+				  ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	default:
>+		PMD_DRV_LOG(ERR, "not supported filter type.");
>+		break;
>+	}
>+
>+	for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
>+		ice_flow_set_fld(seg, field[i],
>+				 ICE_FLOW_FLD_OFF_INVAL,
>+				 ICE_FLOW_FLD_OFF_INVAL,
>+				 ICE_FLOW_FLD_OFF_INVAL, false);
>+	}
>+
>+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
>+				  seg, flow, 0);
>+	if (ret < 0)
>+		goto FREE_SEG;
>+
>+	rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
>+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
>+				  seg_tun, flow, 1);
>+
>+	if (!ret)
>+		return ret;
>+	else if (ret < 0)
>+		goto FREE_SEG;
>+
>+FREE_SEG:

Use lowercase to keep it consistent with others.

>+	rte_free(seg);
>+	rte_free(seg_tun);
>+
>+	if (ret == -EAGAIN)
>+		return 0;
>+	else
>+		return ret;

return (ret == -EAGAIN) ? 0 : ret;

>+}
>+
> static int
> ice_init_fdir_filter(struct ice_adapter *ad)
> {
>-- 
>2.17.1
>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [dpdk-dev 04/12] net/ice: add FDIR create and destroy
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 04/12] net/ice: add FDIR create and destroy Yahui Cao
@ 2019-09-07 12:50   ` Ye Xiaolong
  0 siblings, 0 replies; 19+ messages in thread
From: Ye Xiaolong @ 2019-09-07 12:50 UTC (permalink / raw)
  To: Yahui Cao; +Cc: Qiming Yang, Wenzhuo Lu, dev, Qi Zhang, Beilei Xing

On 09/06, Yahui Cao wrote:
>Add ice_create_fdir_filter to create a rule. If a flow is matched by
>flow director filter, filter rule will be set to HW. Only basic pattern
>and queue/passthru/drop are supported.

Only basic patterns and queue/passthru/drop actions are supported?

>
>Add ice_destroy_fdir_filter to destroy a rule. If a flow is created
>before, filter rule will be removed from HW.
>
>Signed-off-by: Yahui Cao <yahui.cao@intel.com>
>---
> drivers/net/ice/ice_ethdev.h      |   6 +
> drivers/net/ice/ice_fdir_filter.c | 508 ++++++++++++++++++++++++++++++
> 2 files changed, 514 insertions(+)
>
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index ea68858d1..bb821bc41 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -249,6 +249,11 @@ TAILQ_HEAD(ice_flow_list, rte_flow);
> struct ice_flow_parser;
> TAILQ_HEAD(ice_parser_list, ice_flow_parser);
> 
>+struct ice_fdir_filter_conf {
>+	struct ice_fdir_fltr input;
>+	uint64_t input_set;
>+};
>+
> /**
>  *  A structure used to define fields of a FDIR related info.
>  */
>@@ -258,6 +263,7 @@ struct ice_fdir_info {
> 	struct ice_rx_queue *rxq;
> 	void *prg_pkt;                 /* memory for fdir program packet */
> 	uint64_t dma_addr;             /* physic address of packet memory*/
>+	struct ice_fdir_filter_conf conf;
> };
> 
> struct ice_pf {
>diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
>index 0840c3b4b..98bc1be49 100644
>--- a/drivers/net/ice/ice_fdir_filter.c
>+++ b/drivers/net/ice/ice_fdir_filter.c
>@@ -7,6 +7,51 @@
> #include "ice_rxtx.h"
> #include "ice_generic_flow.h"
> 
>+#define ICE_FDIR_INSET_ETH_IPV4 (\
>+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
>+	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
>+
>+#define ICE_FDIR_INSET_ETH_IPV4_UDP (\
>+	ICE_FDIR_INSET_ETH_IPV4 | \
>+	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
>+
>+#define ICE_FDIR_INSET_ETH_IPV4_TCP (\
>+	ICE_FDIR_INSET_ETH_IPV4 | \
>+	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
>+
>+#define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
>+	ICE_FDIR_INSET_ETH_IPV4 | \
>+	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
>+
>+#define ICE_FDIR_INSET_ETH_IPV6 (\
>+	ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
>+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
>+
>+#define ICE_FDIR_INSET_ETH_IPV6_UDP (\
>+	ICE_FDIR_INSET_ETH_IPV6 | \
>+	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
>+
>+#define ICE_FDIR_INSET_ETH_IPV6_TCP (\
>+	ICE_FDIR_INSET_ETH_IPV6 | \
>+	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
>+
>+#define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
>+	ICE_FDIR_INSET_ETH_IPV6 | \
>+	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
>+
>+static struct ice_pattern_match_item ice_fdir_pattern[] = {
>+	{pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
>+	{pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
>+	{pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
>+	{pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
>+	{pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
>+	{pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
>+	{pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
>+	{pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
>+};
>+
>+static struct ice_flow_parser ice_fdir_parser;
>+
> static const struct rte_memzone *
> ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
> {
>@@ -400,6 +445,10 @@ ice_init_fdir_filter(struct ice_adapter *ad)
> 	int ret;
> 
> 	ret = ice_fdir_setup(pf);
>+	if (ret)
>+		return ret;
>+
>+	ret = ice_register_parser(&ice_fdir_parser, ad);
> 
> 	return ret;

return ice_register_parser(&ice_fdir_parser, ad);

> }
>@@ -409,15 +458,474 @@ ice_uninit_fdir_filter(struct ice_adapter *ad)
> {
> 	struct ice_pf *pf = &ad->pf;
> 
>+	ice_unregister_parser(&ice_fdir_parser, ad);
>+
> 	ice_fdir_teardown(pf);
> }
> 
>+static int
>+ice_add_del_fdir_filter(struct ice_pf *pf,
>+			struct ice_fdir_filter_conf *filter,
>+			bool add)
>+{
>+	struct ice_fltr_desc desc;
>+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
>+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
>+	int ret = 0;
>+
>+	memset(&desc, 0, sizeof(desc));
>+	ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
>+
>+	memset(pkt, 0, ICE_FDIR_PKT_LEN);
>+	ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
>+	if (ret) {
>+		PMD_DRV_LOG(ERR, "Generate dummy packet failed");
>+		return -EINVAL;
>+	}
>+
>+	ret = ice_fdir_programming(pf, &desc);
>+	if (ret)
>+		return ret;
>+
>+	return 0;

return ice_fdir_programming(pf, &desc);

>+}
>+
>+static int
>+ice_create_fdir_filter(struct ice_adapter *ad,
>+		       struct rte_flow *flow,
>+		       void *meta,
>+		       struct rte_flow_error *error)
>+{
>+	struct ice_pf *pf = &ad->pf;
>+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
>+	struct ice_fdir_filter_conf *filter = meta;
>+	struct ice_fdir_filter_conf *rule;
>+	int ret = 0;

Unnecessary initialization for ret.


Thanks,
Xiaolong

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [dpdk-dev 07/12] net/ice: enable FDIR queue group
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 07/12] net/ice: enable FDIR queue group Yahui Cao
@ 2019-09-07 18:22   ` Ye Xiaolong
  0 siblings, 0 replies; 19+ messages in thread
From: Ye Xiaolong @ 2019-09-07 18:22 UTC (permalink / raw)
  To: Yahui Cao; +Cc: Qiming Yang, Wenzhuo Lu, dev, Qi Zhang, Beilei Xing

On 09/06, Yahui Cao wrote:
>FDIR can send packet to a group of queues and distruibte it by RSS.
>
>Signed-off-by: Yahui Cao <yahui.cao@intel.com>
>---
> drivers/net/ice/ice_fdir_filter.c | 65 +++++++++++++++++++++++++++++++
> 1 file changed, 65 insertions(+)
>
>diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
>index df4d0329c..ebbe1bd6c 100644
>--- a/drivers/net/ice/ice_fdir_filter.c
>+++ b/drivers/net/ice/ice_fdir_filter.c
>@@ -741,6 +741,62 @@ static struct ice_flow_engine ice_fdir_engine = {
> 	.type = ICE_FLOW_ENGINE_FDIR,
> };
> 
>+static int
>+ice_fdir_parse_action_qregion(struct ice_pf *pf,
>+			      struct rte_flow_error *error,
>+			      const struct rte_flow_action *act,
>+			      struct ice_fdir_filter_conf *filter)
>+{
>+	const struct rte_flow_action_rss *rss = act->conf;
>+	uint32_t i;
>+
>+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
>+		rte_flow_error_set(error, EINVAL,
>+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
>+				   "Invalid action.");
>+		return -rte_errno;
>+	}
>+
>+	if (rss->queue_num <= 1) {
>+		rte_flow_error_set(error, EINVAL,
>+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
>+				   "Queue region size can't be 0 or 1.");
>+		return -rte_errno;
>+	}
>+
>+	/* check if queue index for queue region is continuos */

s/continuos/continuous

>+	for (i = 0; i < rss->queue_num - 1; i++) {
>+		if (rss->queue[i + 1] != rss->queue[i] + 1) {
>+			rte_flow_error_set(error, EINVAL,
>+					   RTE_FLOW_ERROR_TYPE_ACTION, act,
>+					   "Invalid queue region indexes.");

Change the error message to "discontinuous queue region." to be more specific?

>+			return -rte_errno;
>+		}
>+	}
>+
>+	if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
>+		rte_flow_error_set(error, EINVAL,
>+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
>+				   "Invalid queue region indexes.");
>+		return -rte_errno;
>+	}
>+
>+	if (!(rte_is_power_of_2(rss->queue_num) && (rss->queue_num <= 128))) {

Use a macro fro the 128.

>+		rte_flow_error_set(error, EINVAL,
>+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
>+				   "The region sizes should be any of the following values:"

s/sizes/size

>+				   "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
>+				   "of queues do not exceed the VSI allocation.");
>+		return -rte_errno;
>+	}
>+
>+	filter->input.q_index = rss->queue[0];
>+	filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
>+	filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
>+
>+	return 0;
>+}
>+
> static int
> ice_fdir_parse_action(struct ice_adapter *ad,
> 		      const struct rte_flow_action actions[],
>@@ -752,6 +808,7 @@ ice_fdir_parse_action(struct ice_adapter *ad,
> 	const struct rte_flow_action_mark *mark_spec = NULL;
> 	uint32_t dest_num = 0;
> 	uint32_t mark_num = 0;
>+	int ret;
> 
> 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> 		switch (actions->type) {
>@@ -785,6 +842,14 @@ ice_fdir_parse_action(struct ice_adapter *ad,
> 				ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
> 			filter->input.q_index = 0;
> 			break;
>+		case RTE_FLOW_ACTION_TYPE_RSS:
>+			dest_num++;
>+
>+			ret = ice_fdir_parse_action_qregion(pf,
>+						error, actions, filter);
>+			if (ret)
>+				return ret;
>+			break;
> 		case RTE_FLOW_ACTION_TYPE_MARK:
> 			mark_num++;
> 
>-- 
>2.17.1
>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [dpdk-dev 08/12] net/ice: add FDIR dst mac support
  2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 08/12] net/ice: add FDIR dst mac support Yahui Cao
@ 2019-09-07 18:25   ` Ye Xiaolong
  0 siblings, 0 replies; 19+ messages in thread
From: Ye Xiaolong @ 2019-09-07 18:25 UTC (permalink / raw)
  To: Yahui Cao; +Cc: Qiming Yang, Wenzhuo Lu, dev, Qi Zhang, Beilei Xing

On 09/06, Yahui Cao wrote:
>Enable FDIR ethernet destination address field matching support
>
>Signed-off-by: Yahui Cao <yahui.cao@intel.com>
>---
> drivers/net/ice/ice_fdir_filter.c | 27 +++++++++++++++++++++++----
> 1 file changed, 23 insertions(+), 4 deletions(-)
>
>diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
>index ebbe1bd6c..1893aa0ee 100644
>--- a/drivers/net/ice/ice_fdir_filter.c
>+++ b/drivers/net/ice/ice_fdir_filter.c
>@@ -10,6 +10,7 @@
> #include "ice_generic_flow.h"
> 
> #define ICE_FDIR_INSET_ETH_IPV4 (\
>+	ICE_INSET_DMAC | \
> 	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
> 	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
> 
>@@ -387,6 +388,7 @@ ice_parse_input_set(uint64_t inset, enum ice_flow_field *field)
> 		enum ice_flow_field fld;
> 	};
> 	static const struct ice_inset_map ice_inset_map[] = {
>+		{ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
> 		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
> 		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
> 		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
>@@ -918,13 +920,30 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
> 		case RTE_FLOW_ITEM_TYPE_ETH:
> 			eth_spec = item->spec;
> 			eth_mask = item->mask;
>-			if (eth_spec || eth_mask) {
>-				rte_flow_error_set(error, EINVAL,
>+			if (eth_spec && eth_mask) {
>+				if (!rte_is_zero_ether_addr(&eth_spec->src) ||
>+				    !rte_is_zero_ether_addr(&eth_mask->src)) {
>+					rte_flow_error_set(error, EINVAL,
> 						RTE_FLOW_ERROR_TYPE_ITEM,
> 						item,
>-						"eth mac not support");
>-				return -rte_errno;
>+						"Src mac not support");
>+					return -rte_errno;
>+				}
>+
>+				if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
>+					rte_flow_error_set(error, EINVAL,
>+						RTE_FLOW_ERROR_TYPE_ITEM,
>+						item,
>+						"Invalid mac addr mask");
>+					return -rte_errno;
>+				}
>+
>+				input_set |= ICE_INSET_DMAC;
>+				rte_memcpy(&filter->input.ext_data.dst_mac,
>+					   &eth_spec->dst,
>+					   RTE_ETHER_ADDR_LEN);
> 			}
>+

Unnecessary empty line.

> 			break;
> 		case RTE_FLOW_ITEM_TYPE_IPV4:
> 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
>-- 
>2.17.1
>

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2019-09-07 18:27 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-06 12:00 [dpdk-dev] [dpdk-dev 00/12] net/ice: add ice Flow Director driver Yahui Cao
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow director Yahui Cao
2019-09-07 11:01   ` Ye Xiaolong
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 02/12] net/ice: tear down " Yahui Cao
2019-09-07 11:21   ` Ye Xiaolong
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 03/12] net/ice: enable input set configuration Yahui Cao
2019-09-07 12:32   ` Ye Xiaolong
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 04/12] net/ice: add FDIR create and destroy Yahui Cao
2019-09-07 12:50   ` Ye Xiaolong
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 05/12] net/ice: add FDIR mark action support Yahui Cao
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 06/12] net/ice: add hash table for FDIR Yahui Cao
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 07/12] net/ice: enable FDIR queue group Yahui Cao
2019-09-07 18:22   ` Ye Xiaolong
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 08/12] net/ice: add FDIR dst mac support Yahui Cao
2019-09-07 18:25   ` Ye Xiaolong
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 09/12] net/ice: add FDIR counter resource init/release Yahui Cao
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 10/12] net/ice: add FDIR counter support for flow id Yahui Cao
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 11/12] net/ice: add FDIR counter support for flow shared Yahui Cao
2019-09-06 12:00 ` [dpdk-dev] [dpdk-dev 12/12] net/ice: add FDIR non-word aligned field support Yahui Cao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.