All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00/18] net/ixgbe: Consistent filter API
@ 2016-12-02 10:42 Wei Zhao
  2016-12-02 10:42 ` [PATCH 01/18] net/ixgbe: store SYN filter Wei Zhao
                   ` (17 more replies)
  0 siblings, 18 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:42 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.

wei zhao1 (18):
  net/ixgbe: store SYN filter
  net/ixgbe: store flow director filter
  net/ixgbe: store L2 tunnel filter
  net/ixgbe: restore n-tuple filter
  net/ixgbe: restore ether type filter
  net/ixgbe: restore SYN filter
  net/ixgbe: restore flow director filter
  net/ixgbe: restore L2 tunnel filter
  net/ixgbe: store and restore L2 tunnel configuration
  net/ixgbe: flush all the filters
  net/ixgbe: parse n-tuple filter
  net/ixgbe: parse ethertype filter
  net/ixgbe: parse SYN filter
  net/ixgbe: parse L2 tunnel filter
  net/ixgbe: parse flow director filter
  net/ixgbe: create consistent filter
  net/ixgbe: destroy consistent filter
  net/ixgbe: flush consistent filter

 drivers/net/ixgbe/ixgbe_ethdev.c | 2570 ++++++++++++++++++++++++++++++++++++--
 drivers/net/ixgbe/ixgbe_ethdev.h |  173 ++-
 drivers/net/ixgbe/ixgbe_fdir.c   |  403 ++++--
 drivers/net/ixgbe/ixgbe_pf.c     |   26 +-
 4 files changed, 2982 insertions(+), 190 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 01/18] net/ixgbe: store SYN filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
@ 2016-12-02 10:42 ` Wei Zhao
  2016-12-20 16:55   ` Ferruh Yigit
  2016-12-02 10:42 ` [PATCH 02/18] net/ixgbe: store flow director filter Wei Zhao
                   ` (16 subsequent siblings)
  17 siblings, 1 reply; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:42 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for storing SYN filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 12 ++++++++++--
 drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
 2 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index edc9b22..7f10cca 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1287,6 +1287,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 	memset(filter_info->fivetuple_mask, 0,
 	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
 
+	/* initialize SYN filter */
+	filter_info->syn_info = 0;
 	return 0;
 }
 
@@ -5509,15 +5511,19 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
 			bool add)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	uint32_t syn_info;
 	uint32_t synqf;
 
 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
 		return -EINVAL;
 
+	syn_info = filter_info->syn_info;
 	synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
 
 	if (add) {
-		if (synqf & IXGBE_SYN_FILTER_ENABLE)
+		if (syn_info & IXGBE_SYN_FILTER_ENABLE)
 			return -EINVAL;
 		synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
 			IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
@@ -5527,10 +5533,12 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
 		else
 			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
 	} else {
-		if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+		if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
 			return -ENOENT;
 		synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
 	}
+
+	filter_info->syn_info = synqf;
 	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
 	IXGBE_WRITE_FLUSH(hw);
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 4ff6338..827026c 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -262,6 +262,8 @@ struct ixgbe_filter_info {
 	/* Bit mask for every used 5tuple filter */
 	uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
 	struct ixgbe_5tuple_filter_list fivetuple_list;
+	/* store the SYN filter info */
+	uint32_t syn_info;
 };
 
 /*
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 02/18] net/ixgbe: store flow director filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
  2016-12-02 10:42 ` [PATCH 01/18] net/ixgbe: store SYN filter Wei Zhao
@ 2016-12-02 10:42 ` Wei Zhao
  2016-12-20 16:58   ` Ferruh Yigit
  2016-12-02 10:42 ` [PATCH 03/18] net/ixgbe: store L2 tunnel filter Wei Zhao
                   ` (15 subsequent siblings)
  17 siblings, 1 reply; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:42 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for storing flow director filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  48 ++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  19 ++++++-
 drivers/net/ixgbe/ixgbe_fdir.c   | 105 ++++++++++++++++++++++++++++++++++++++-
 3 files changed, 169 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7f10cca..f8e5fe1 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -60,6 +60,7 @@
 #include <rte_malloc.h>
 #include <rte_random.h>
 #include <rte_dev.h>
+#include <rte_hash_crc.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -1094,10 +1095,22 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 		IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
 	struct ixgbe_filter_info *filter_info =
 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+	struct ixgbe_hw_fdir_info *fdir_info =
+		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
 	uint32_t ctrl_ext;
 	uint16_t csum;
 	int diag, i;
 
+	char fdir_hash_name[RTE_HASH_NAMESIZE];
+	struct rte_hash_parameters fdir_hash_params = {
+		.name = fdir_hash_name,
+		.entries = IXGBE_MAX_FDIR_FILTER_NUM,
+		.key_len = sizeof(union ixgbe_atr_input),
+		.hash_func = rte_hash_crc,
+		.hash_func_init_val = 0,
+		.socket_id = rte_socket_id(),
+	};
+
 	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
@@ -1289,6 +1302,25 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* initialize SYN filter */
 	filter_info->syn_info = 0;
+	/* initialize flow director filter list & hash */
+	TAILQ_INIT(&fdir_info->fdir_list);
+	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+		 "fdir_%s", eth_dev->data->name);
+	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+	if (!fdir_info->hash_handle) {
+		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+		return -EINVAL;
+	}
+	fdir_info->hash_map = rte_zmalloc("ixgbe",
+					  sizeof(struct ixgbe_fdir_filter *) *
+					  IXGBE_MAX_FDIR_FILTER_NUM,
+					  0);
+	if (!fdir_info->hash_map) {
+		PMD_INIT_LOG(ERR,
+			     "Failed to allocate memory for fdir hash map!");
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -1297,6 +1329,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 {
 	struct rte_pci_device *pci_dev;
 	struct ixgbe_hw *hw;
+	struct ixgbe_hw_fdir_info *fdir_info =
+		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+	struct ixgbe_fdir_filter *fdir_filter;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1330,6 +1365,19 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 	rte_free(eth_dev->data->hash_mac_addrs);
 	eth_dev->data->hash_mac_addrs = NULL;
 
+	/* remove all the fdir filters & hash */
+	if (fdir_info->hash_map)
+		rte_free(fdir_info->hash_map);
+	if (fdir_info->hash_handle)
+		rte_hash_free(fdir_info->hash_handle);
+
+	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+		TAILQ_REMOVE(&fdir_info->fdir_list,
+			     fdir_filter,
+			     entries);
+		rte_free(fdir_filter);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 827026c..8310220 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -38,6 +38,7 @@
 #include "base/ixgbe_dcb_82598.h"
 #include "ixgbe_bypass.h"
 #include <rte_time.h>
+#include <rte_hash.h>
 
 /* need update link, bit flag */
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -130,10 +131,11 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_MAX_FDIR_FILTER_NUM       (1024 * 32)
+
 /*
  * Information about the fdir mode.
  */
-
 struct ixgbe_hw_fdir_mask {
 	uint16_t vlan_tci_mask;
 	uint32_t src_ipv4_mask;
@@ -148,6 +150,17 @@ struct ixgbe_hw_fdir_mask {
 	uint8_t  tunnel_type_mask;
 };
 
+struct ixgbe_fdir_filter {
+	TAILQ_ENTRY(ixgbe_fdir_filter) entries;
+	union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+	uint32_t fdirflags; /* drop or forward */
+	uint32_t fdirhash; /* hash value for fdir */
+	uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
+
 struct ixgbe_hw_fdir_info {
 	struct ixgbe_hw_fdir_mask mask;
 	uint8_t     flex_bytes_offset;
@@ -159,6 +172,10 @@ struct ixgbe_hw_fdir_info {
 	uint64_t    remove;
 	uint64_t    f_add;
 	uint64_t    f_remove;
+	struct ixgbe_fdir_filter_list fdir_list; /* filter list*/
+	/* store the pointers of the filters, index is the hash value. */
+	struct ixgbe_fdir_filter **hash_map;
+	struct rte_hash *hash_handle; /* cuckoo hash handler */
 };
 
 /* structure for interrupt relative data */
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 4b81ee3..bfcd294 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -43,6 +43,7 @@
 #include <rte_pci.h>
 #include <rte_ether.h>
 #include <rte_ethdev.h>
+#include <rte_malloc.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -1075,6 +1076,65 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
 
 }
 
+static inline struct ixgbe_fdir_filter *
+ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
+			 union ixgbe_atr_input *key)
+{
+	int ret = 0;
+
+	ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
+	if (ret < 0)
+		return NULL;
+
+	return fdir_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+			 struct ixgbe_fdir_filter *fdir_filter)
+{
+	int ret = 0;
+
+	ret = rte_hash_add_key(fdir_info->hash_handle,
+			       &fdir_filter->ixgbe_fdir);
+
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to insert fdir filter to hash table %d!",
+			    ret);
+		return ret;
+	}
+
+	fdir_info->hash_map[ret] = fdir_filter;
+
+	TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+	return 0;
+}
+
+static inline int
+ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+			 union ixgbe_atr_input *key)
+{
+	int ret = 0;
+	struct ixgbe_fdir_filter *fdir_filter;
+
+	ret = rte_hash_del_key(fdir_info->hash_handle, key);
+
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
+		return ret;
+	}
+
+	fdir_filter = fdir_info->hash_map[ret];
+	fdir_info->hash_map[ret] = NULL;
+
+	TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+	rte_free(fdir_filter);
+
+	return 0;
+}
+
 /*
  * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
  * @dev: pointer to the structure rte_eth_dev
@@ -1098,6 +1158,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	struct ixgbe_hw_fdir_info *info =
 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
 	enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+	struct ixgbe_fdir_filter *node;
+	bool add_node = FALSE;
 
 	if (fdir_mode == RTE_FDIR_MODE_NONE)
 		return -ENOTSUP;
@@ -1148,6 +1210,10 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 						      dev->data->dev_conf.fdir_conf.pballoc);
 
 	if (del) {
+		err = ixgbe_remove_fdir_filter(info, &input);
+		if (err < 0)
+			return err;
+
 		err = fdir_erase_filter_82599(hw, fdirhash);
 		if (err < 0)
 			PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
@@ -1172,6 +1238,37 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	else
 		return -EINVAL;
 
+	node = ixgbe_fdir_filter_lookup(info, &input);
+	if (node) {
+		if (update) {
+			node->fdirflags = fdircmd_flags;
+			node->fdirhash = fdirhash;
+			node->queue = queue;
+		} else {
+			PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+			return -EINVAL;
+		}
+	} else {
+		add_node = TRUE;
+		node = rte_zmalloc("ixgbe_fdir",
+				   sizeof(struct ixgbe_fdir_filter),
+				   0);
+		if (!node)
+			return -ENOMEM;
+		(void)rte_memcpy(&node->ixgbe_fdir,
+				 &input,
+				 sizeof(union ixgbe_atr_input));
+		node->fdirflags = fdircmd_flags;
+		node->fdirhash = fdirhash;
+		node->queue = queue;
+
+		err = ixgbe_insert_fdir_filter(info, node);
+		if (err < 0) {
+			rte_free(node);
+			return err;
+		}
+	}
+
 	if (is_perfect) {
 		err = fdir_write_perfect_filter_82599(hw, &input, queue,
 						      fdircmd_flags, fdirhash,
@@ -1180,10 +1277,14 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 		err = fdir_add_signature_filter_82599(hw, &input, queue,
 						      fdircmd_flags, fdirhash);
 	}
-	if (err < 0)
+	if (err < 0) {
 		PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
-	else
+
+		if (add_node)
+			(void)ixgbe_remove_fdir_filter(info, &input);
+	} else {
 		PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+	}
 
 	return err;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 03/18] net/ixgbe: store L2 tunnel filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
  2016-12-02 10:42 ` [PATCH 01/18] net/ixgbe: store SYN filter Wei Zhao
  2016-12-02 10:42 ` [PATCH 02/18] net/ixgbe: store flow director filter Wei Zhao
@ 2016-12-02 10:42 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 04/18] net/ixgbe: restore n-tuple filter Wei Zhao
                   ` (14 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:42 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for storing L2 tunnel filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 151 +++++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  24 +++++++
 2 files changed, 175 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f8e5fe1..9b834cb 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1097,6 +1097,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
 	struct ixgbe_hw_fdir_info *fdir_info =
 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
 	uint32_t ctrl_ext;
 	uint16_t csum;
 	int diag, i;
@@ -1111,6 +1113,16 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 		.socket_id = rte_socket_id(),
 	};
 
+	char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+	struct rte_hash_parameters l2_tn_hash_params = {
+		.name = l2_tn_hash_name,
+		.entries = IXGBE_MAX_L2_TN_FILTER_NUM,
+		.key_len = sizeof(struct ixgbe_l2_tn_key),
+		.hash_func = rte_hash_crc,
+		.hash_func_init_val = 0,
+		.socket_id = rte_socket_id(),
+	};
+
 	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
@@ -1321,6 +1333,25 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 		return -ENOMEM;
 	}
 
+	/* initialize l2 tunnel filter list & hash */
+	TAILQ_INIT(&l2_tn_info->l2_tn_list);
+	snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+		 "l2_tn_%s", eth_dev->data->name);
+	l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+	if (!l2_tn_info->hash_handle) {
+		PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+		return -EINVAL;
+	}
+	l2_tn_info->hash_map = rte_zmalloc("ixgbe",
+					   sizeof(struct ixgbe_l2_tn_filter *) *
+					   IXGBE_MAX_L2_TN_FILTER_NUM,
+					   0);
+	if (!l2_tn_info->hash_map) {
+		PMD_INIT_LOG(ERR,
+			     "Failed to allocate memory for L2 TN hash map!");
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -1331,7 +1362,10 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 	struct ixgbe_hw *hw;
 	struct ixgbe_hw_fdir_info *fdir_info =
 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
 	struct ixgbe_fdir_filter *fdir_filter;
+	struct ixgbe_l2_tn_filter *l2_tn_filter;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1378,6 +1412,19 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 		rte_free(fdir_filter);
 	}
 
+	/* remove all the L2 tunnel filters & hash */
+	if (l2_tn_info->hash_map)
+		rte_free(l2_tn_info->hash_map);
+	if (l2_tn_info->hash_handle)
+		rte_hash_free(l2_tn_info->hash_handle);
+
+	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+		TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+			     l2_tn_filter,
+			     entries);
+		rte_free(l2_tn_filter);
+	}
+
 	return 0;
 }
 
@@ -7100,12 +7147,104 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
 	return -EINVAL;
 }
 
+static inline struct ixgbe_l2_tn_filter *
+ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
+			  struct ixgbe_l2_tn_key *key)
+{
+	int ret = 0;
+
+	ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+	if (ret < 0)
+		return NULL;
+
+	return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+			  struct ixgbe_l2_tn_filter *l2_tn_filter)
+{
+	int ret = 0;
+
+	ret = rte_hash_add_key(l2_tn_info->hash_handle,
+			       &l2_tn_filter->key);
+
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to insert L2 tunnel filter"
+			    " to hash table %d!",
+			    ret);
+		return ret;
+	}
+
+	l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+	TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+	return 0;
+}
+
+static inline int
+ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+			  struct ixgbe_l2_tn_key *key)
+{
+	int ret = 0;
+	struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+	ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR,
+			    "No such L2 tunnel filter to delete %d!",
+			    ret);
+		return ret;
+	}
+
+	l2_tn_filter = l2_tn_info->hash_map[ret];
+	l2_tn_info->hash_map[ret] = NULL;
+
+	TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+	rte_free(l2_tn_filter);
+
+	return 0;
+}
+
 /* Add l2 tunnel filter */
 static int
 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 			       struct rte_eth_l2_tunnel_conf *l2_tunnel)
 {
 	int ret = 0;
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+	struct ixgbe_l2_tn_key key;
+	struct ixgbe_l2_tn_filter *node;
+
+	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+	key.tn_id = l2_tunnel->tunnel_id;
+
+	node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+	if (node) {
+		PMD_DRV_LOG(ERR, "The L2 tunnel filter already exists!");
+		return -EINVAL;
+	}
+
+	node = rte_zmalloc("ixgbe_l2_tn",
+			   sizeof(struct ixgbe_l2_tn_filter),
+			   0);
+	if (!node)
+		return -ENOMEM;
+
+	(void)rte_memcpy(&node->key,
+			 &key,
+			 sizeof(struct ixgbe_l2_tn_key));
+	node->pool = l2_tunnel->pool;
+	ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+	if (ret < 0) {
+		rte_free(node);
+		return ret;
+	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
 	case RTE_L2_TUNNEL_TYPE_E_TAG:
@@ -7117,6 +7256,9 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 		break;
 	}
 
+	if (ret < 0)
+		(void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
 	return ret;
 }
 
@@ -7126,6 +7268,15 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 			       struct rte_eth_l2_tunnel_conf *l2_tunnel)
 {
 	int ret = 0;
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+	struct ixgbe_l2_tn_key key;
+
+	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+	key.tn_id = l2_tunnel->tunnel_id;
+	ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+	if (ret < 0)
+		return ret;
 
 	switch (l2_tunnel->l2_tunnel_type) {
 	case RTE_L2_TUNNEL_TYPE_E_TAG:
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 8310220..6663fc9 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -132,6 +132,7 @@
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
 #define IXGBE_MAX_FDIR_FILTER_NUM       (1024 * 32)
+#define IXGBE_MAX_L2_TN_FILTER_NUM      128
 
 /*
  * Information about the fdir mode.
@@ -283,6 +284,25 @@ struct ixgbe_filter_info {
 	uint32_t syn_info;
 };
 
+struct ixgbe_l2_tn_key {
+	enum rte_eth_tunnel_type          l2_tn_type;
+	uint32_t                          tn_id;
+};
+
+struct ixgbe_l2_tn_filter {
+	TAILQ_ENTRY(ixgbe_l2_tn_filter)    entries;
+	struct ixgbe_l2_tn_key             key;
+	uint32_t                           pool;
+};
+
+TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter);
+
+struct ixgbe_l2_tn_info {
+	struct ixgbe_l2_tn_filter_list      l2_tn_list;
+	struct ixgbe_l2_tn_filter         **hash_map;
+	struct rte_hash                    *hash_handle;
+};
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
@@ -302,6 +322,7 @@ struct ixgbe_adapter {
 	struct ixgbe_bypass_info    bps;
 #endif /* RTE_NIC_BYPASS */
 	struct ixgbe_filter_info    filter;
+	struct ixgbe_l2_tn_info     l2_tn;
 
 	bool rx_bulk_alloc_allowed;
 	bool rx_vec_allowed;
@@ -346,6 +367,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->filter)
 
+#define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->l2_tn)
+
 /*
  * RX/TX function prototypes
  */
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 04/18] net/ixgbe: restore n-tuple filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (2 preceding siblings ...)
  2016-12-02 10:42 ` [PATCH 03/18] net/ixgbe: store L2 tunnel filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-20 16:58   ` Ferruh Yigit
  2016-12-02 10:43 ` [PATCH 05/18] net/ixgbe: restore ether type filter Wei Zhao
                   ` (13 subsequent siblings)
  17 siblings, 1 reply; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for restoring n-tuple filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 131 +++++++++++++++++++++++++--------------
 1 file changed, 83 insertions(+), 48 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 9b834cb..0773afd 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -390,6 +390,7 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 					 struct rte_eth_udp_tunnel *udp_tunnel);
 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 					 struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_filter_restore(struct rte_eth_dev *dev);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -1360,6 +1361,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 {
 	struct rte_pci_device *pci_dev;
 	struct ixgbe_hw *hw;
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+	struct ixgbe_5tuple_filter *p_5tuple;
 	struct ixgbe_hw_fdir_info *fdir_info =
 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
 	struct ixgbe_l2_tn_info *l2_tn_info =
@@ -1399,6 +1403,16 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 	rte_free(eth_dev->data->hash_mac_addrs);
 	eth_dev->data->hash_mac_addrs = NULL;
 
+	/* Remove all ntuple filters of the device */
+	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+		TAILQ_REMOVE(&filter_info->fivetuple_list,
+			     p_5tuple,
+			     entries);
+		rte_free(p_5tuple);
+	}
+	memset(filter_info->fivetuple_mask, 0,
+	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
 	/* remove all the fdir filters & hash */
 	if (fdir_info->hash_map)
 		rte_free(fdir_info->hash_map);
@@ -2482,6 +2496,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 
 	/* resume enabled intr since hw reset */
 	ixgbe_enable_intr(dev);
+	ixgbe_filter_restore(dev);
 
 	return 0;
 
@@ -2502,9 +2517,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_vf_info *vfinfo =
 		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 	int vf;
 
@@ -2542,17 +2554,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
 	memset(&link, 0, sizeof(link));
 	rte_ixgbe_dev_atomic_write_link_status(dev, &link);
 
-	/* Remove all ntuple filters of the device */
-	for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
-	     p_5tuple != NULL; p_5tuple = p_5tuple_next) {
-		p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
-		TAILQ_REMOVE(&filter_info->fivetuple_list,
-			     p_5tuple, entries);
-		rte_free(p_5tuple);
-	}
-	memset(filter_info->fivetuple_mask, 0,
-		sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-
 	if (!rte_intr_allow_others(intr_handle))
 		/* resume to the default handler */
 		rte_intr_callback_register(intr_handle,
@@ -5711,6 +5712,52 @@ convert_protocol_type(uint8_t protocol_value)
 		return IXGBE_FILTER_PROTOCOL_NONE;
 }
 
+/* inject a 5-tuple filter to HW */
+static inline void
+ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+			   struct ixgbe_5tuple_filter *filter)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int i;
+	uint32_t ftqf, sdpqf;
+	uint32_t l34timir = 0;
+	uint8_t mask = 0xff;
+
+	i = filter->index;
+
+	sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+				IXGBE_SDPQF_DSTPORT_SHIFT);
+	sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+	ftqf = (uint32_t)(filter->filter_info.proto &
+		IXGBE_FTQF_PROTOCOL_MASK);
+	ftqf |= (uint32_t)((filter->filter_info.priority &
+		IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+		mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+	if (filter->filter_info.dst_ip_mask == 0)
+		mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+	if (filter->filter_info.src_port_mask == 0)
+		mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+	if (filter->filter_info.dst_port_mask == 0)
+		mask &= IXGBE_FTQF_DEST_PORT_MASK;
+	if (filter->filter_info.proto_mask == 0)
+		mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+	ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+	ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+	ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+	IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+	IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+	l34timir |= IXGBE_L34T_IMIR_RESERVE;
+	l34timir |= (uint32_t)(filter->queue <<
+				IXGBE_L34T_IMIR_QUEUE_SHIFT);
+	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+}
+
 /*
  * add a 5tuple filter
  *
@@ -5728,13 +5775,9 @@ static int
 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
 			struct ixgbe_5tuple_filter *filter)
 {
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_filter_info *filter_info =
 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
 	int i, idx, shift;
-	uint32_t ftqf, sdpqf;
-	uint32_t l34timir = 0;
-	uint8_t mask = 0xff;
 
 	/*
 	 * look for an unused 5tuple filter index,
@@ -5757,37 +5800,8 @@ ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
 		return -ENOSYS;
 	}
 
-	sdpqf = (uint32_t)(filter->filter_info.dst_port <<
-				IXGBE_SDPQF_DSTPORT_SHIFT);
-	sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
-
-	ftqf = (uint32_t)(filter->filter_info.proto &
-		IXGBE_FTQF_PROTOCOL_MASK);
-	ftqf |= (uint32_t)((filter->filter_info.priority &
-		IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
-	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
-		mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
-	if (filter->filter_info.dst_ip_mask == 0)
-		mask &= IXGBE_FTQF_DEST_ADDR_MASK;
-	if (filter->filter_info.src_port_mask == 0)
-		mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
-	if (filter->filter_info.dst_port_mask == 0)
-		mask &= IXGBE_FTQF_DEST_PORT_MASK;
-	if (filter->filter_info.proto_mask == 0)
-		mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
-	ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
-	ftqf |= IXGBE_FTQF_POOL_MASK_EN;
-	ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
-
-	IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
-	IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
-	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
-	IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+	ixgbe_inject_5tuple_filter(dev, filter);
 
-	l34timir |= IXGBE_L34T_IMIR_RESERVE;
-	l34timir |= (uint32_t)(filter->queue <<
-				IXGBE_L34T_IMIR_QUEUE_SHIFT);
-	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
 	return 0;
 }
 
@@ -7799,6 +7813,27 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+/* restore n-tuple filter */
+static inline void
+ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	struct ixgbe_5tuple_filter *node;
+
+	TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+		ixgbe_inject_5tuple_filter(dev, node);
+	}
+}
+
+static int
+ixgbe_filter_restore(struct rte_eth_dev *dev)
+{
+	ixgbe_ntuple_filter_restore(dev);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 05/18] net/ixgbe: restore ether type filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (3 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 04/18] net/ixgbe: restore n-tuple filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 06/18] net/ixgbe: restore SYN filter Wei Zhao
                   ` (12 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for restoring ether type filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 84 ++++++++++++++++++----------------------
 drivers/net/ixgbe/ixgbe_ethdev.h | 57 ++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_pf.c     | 25 +++++++-----
 3 files changed, 109 insertions(+), 57 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 0773afd..f1dee4d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1308,6 +1308,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 	/* enable support intr */
 	ixgbe_enable_intr(eth_dev);
 
+	/* initialize ether type filter */
+	filter_info->ethertype_mask = 0;
+	memset(filter_info->ethertype_filters, 0,
+	       sizeof(struct ixgbe_ethertype_filter) * IXGBE_MAX_ETQF_FILTERS);
+
 	/* initialize 5tuple filter list */
 	TAILQ_INIT(&filter_info->fivetuple_list);
 	memset(filter_info->fivetuple_mask, 0,
@@ -6125,47 +6130,6 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
 	return ret;
 }
 
-static inline int
-ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
-			uint16_t ethertype)
-{
-	int i;
-
-	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
-		if (filter_info->ethertype_filters[i] == ethertype &&
-		    (filter_info->ethertype_mask & (1 << i)))
-			return i;
-	}
-	return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
-			uint16_t ethertype)
-{
-	int i;
-
-	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
-		if (!(filter_info->ethertype_mask & (1 << i))) {
-			filter_info->ethertype_mask |= 1 << i;
-			filter_info->ethertype_filters[i] = ethertype;
-			return i;
-		}
-	}
-	return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
-			uint8_t idx)
-{
-	if (idx >= IXGBE_MAX_ETQF_FILTERS)
-		return -1;
-	filter_info->ethertype_mask &= ~(1 << idx);
-	filter_info->ethertype_filters[idx] = 0;
-	return idx;
-}
-
 static int
 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
 			struct rte_eth_ethertype_filter *filter,
@@ -6177,6 +6141,7 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
 	uint32_t etqf = 0;
 	uint32_t etqs = 0;
 	int ret;
+	struct ixgbe_ethertype_filter ethertype_filter;
 
 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
 		return -EINVAL;
@@ -6210,18 +6175,22 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
 	}
 
 	if (add) {
-		ret = ixgbe_ethertype_filter_insert(filter_info,
-			filter->ether_type);
-		if (ret < 0) {
-			PMD_DRV_LOG(ERR, "ethertype filters are full.");
-			return -ENOSYS;
-		}
 		etqf = IXGBE_ETQF_FILTER_EN;
 		etqf |= (uint32_t)filter->ether_type;
 		etqs |= (uint32_t)((filter->queue <<
 				    IXGBE_ETQS_RX_QUEUE_SHIFT) &
 				    IXGBE_ETQS_RX_QUEUE);
 		etqs |= IXGBE_ETQS_QUEUE_EN;
+
+		ethertype_filter.ethertype = filter->ether_type;
+		ethertype_filter.etqf = etqf;
+		ethertype_filter.etqs = etqs;
+		ret = ixgbe_ethertype_filter_insert(filter_info,
+						    &ethertype_filter);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "ethertype filters are full.");
+			return -ENOSPC;
+		}
 	} else {
 		ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
 		if (ret < 0)
@@ -7826,10 +7795,31 @@ ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
 	}
 }
 
+/* restore ethernet type filter */
+static inline void
+ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	int i;
+
+	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+		if (filter_info->ethertype_mask & (1 << i)) {
+			IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+					filter_info->ethertype_filters[i].etqf);
+			IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
+					filter_info->ethertype_filters[i].etqs);
+			IXGBE_WRITE_FLUSH(hw);
+		}
+	}
+}
+
 static int
 ixgbe_filter_restore(struct rte_eth_dev *dev)
 {
 	ixgbe_ntuple_filter_restore(dev);
+	ixgbe_ethertype_filter_restore(dev);
 
 	return 0;
 }
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 6663fc9..c30a8c0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -270,13 +270,19 @@ struct ixgbe_5tuple_filter {
 	(RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
 	 (sizeof(uint32_t) * NBBY))
 
+struct ixgbe_ethertype_filter {
+	uint16_t ethertype;
+	uint32_t etqf;
+	uint32_t etqs;
+};
+
 /*
  * Structure to store filters' info.
  */
 struct ixgbe_filter_info {
 	uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter */
 	/* store used ethertype filters*/
-	uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+	struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
 	/* Bit mask for every used 5tuple filter */
 	uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
 	struct ixgbe_5tuple_filter_list fivetuple_list;
@@ -485,4 +491,53 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+			      uint16_t ethertype)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+		if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+		    (filter_info->ethertype_mask & (1 << i)))
+			return i;
+	}
+	return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+			      struct ixgbe_ethertype_filter *ethertype_filter)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+		if (!(filter_info->ethertype_mask & (1 << i))) {
+			filter_info->ethertype_mask |= 1 << i;
+			filter_info->ethertype_filters[i].ethertype =
+				ethertype_filter->ethertype;
+			filter_info->ethertype_filters[i].etqf =
+				ethertype_filter->etqf;
+			filter_info->ethertype_filters[i].etqs =
+				ethertype_filter->etqs;
+			return i;
+		}
+	}
+	return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+			      uint8_t idx)
+{
+	if (idx >= IXGBE_MAX_ETQF_FILTERS)
+		return -1;
+	filter_info->ethertype_mask &= ~(1 << idx);
+	filter_info->ethertype_filters[idx].ethertype = 0;
+	filter_info->ethertype_filters[idx].etqf = 0;
+	filter_info->ethertype_filters[idx].etqs = 0;
+	return idx;
+}
+
 #endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 26395e4..6139915 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -176,6 +176,7 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
 	uint16_t vf_num;
 	int i;
+	struct ixgbe_ethertype_filter ethertype_filter;
 
 	if (!hw->mac.ops.set_ethertype_anti_spoofing) {
 		RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
@@ -183,16 +184,22 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
 		return;
 	}
 
-	/* occupy an entity of ether type filter */
-	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
-		if (!(filter_info->ethertype_mask & (1 << i))) {
-			filter_info->ethertype_mask |= 1 << i;
-			filter_info->ethertype_filters[i] =
-				IXGBE_ETHERTYPE_FLOW_CTRL;
-			break;
-		}
+	i = ixgbe_ethertype_filter_lookup(filter_info,
+					  IXGBE_ETHERTYPE_FLOW_CTRL);
+	if (i >= 0) {
+		RTE_LOG(ERR, PMD, "A ether type filter"
+			" entity for flow control already exists!\n");
+		return;
 	}
-	if (i == IXGBE_MAX_ETQF_FILTERS) {
+
+	ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
+	ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
+				IXGBE_ETQF_TX_ANTISPOOF |
+				IXGBE_ETHERTYPE_FLOW_CTRL;
+	ethertype_filter.etqs = 0;
+	i = ixgbe_ethertype_filter_insert(filter_info,
+					  &ethertype_filter);
+	if (i < 0) {
 		RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
 			" entity for flow control.\n");
 		return;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 06/18] net/ixgbe: restore SYN filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (4 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 05/18] net/ixgbe: restore ether type filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 07/18] net/ixgbe: restore flow director filter Wei Zhao
                   ` (11 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for restoring SYN filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f1dee4d..efbaa62 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -7815,11 +7815,29 @@ ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
 	}
 }
 
+/* restore SYN filter */
+static inline void
+ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	uint32_t synqf;
+
+	synqf = filter_info->syn_info;
+
+	if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+		IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+}
+
 static int
 ixgbe_filter_restore(struct rte_eth_dev *dev)
 {
 	ixgbe_ntuple_filter_restore(dev);
 	ixgbe_ethertype_filter_restore(dev);
+	ixgbe_syn_filter_restore(dev);
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 07/18] net/ixgbe: restore flow director filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (5 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 06/18] net/ixgbe: restore SYN filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 08/18] net/ixgbe: restore L2 tunnel filter Wei Zhao
                   ` (10 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for storing flow director filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  1 +
 drivers/net/ixgbe/ixgbe_ethdev.h |  1 +
 drivers/net/ixgbe/ixgbe_fdir.c   | 35 +++++++++++++++++++++++++++++++++++
 3 files changed, 37 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index efbaa62..e145866 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -7838,6 +7838,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev)
 	ixgbe_ntuple_filter_restore(dev);
 	ixgbe_ethertype_filter_restore(dev);
 	ixgbe_syn_filter_restore(dev);
+	ixgbe_fdir_filter_restore(dev);
 
 	return 0;
 }
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c30a8c0..d6253ad 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -491,6 +491,7 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
 
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index bfcd294..d390972 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -1479,3 +1479,38 @@ ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 	}
 	return ret;
 }
+
+/* restore flow director filter */
+void
+ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_hw_fdir_info *fdir_info =
+		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	struct ixgbe_fdir_filter *node;
+	bool is_perfect = FALSE;
+	enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+	if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+	    fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+		is_perfect = TRUE;
+
+	if (is_perfect) {
+		TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+			(void)fdir_write_perfect_filter_82599(hw,
+							      &node->ixgbe_fdir,
+							      node->queue,
+							      node->fdirflags,
+							      node->fdirhash,
+							      fdir_mode);
+		}
+	} else {
+		TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+			(void)fdir_add_signature_filter_82599(hw,
+							      &node->ixgbe_fdir,
+							      node->queue,
+							      node->fdirflags,
+							      node->fdirhash);
+		}
+	}
+}
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 08/18] net/ixgbe: restore L2 tunnel filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (6 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 07/18] net/ixgbe: restore flow director filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 09/18] net/ixgbe: store and restore L2 tunnel configuration Wei Zhao
                   ` (9 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for restoring L2 tunnel filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 69 ++++++++++++++++++++++++++--------------
 1 file changed, 46 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index e145866..cc56aab 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -7195,7 +7195,8 @@ ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
 /* Add l2 tunnel filter */
 static int
 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
-			       struct rte_eth_l2_tunnel_conf *l2_tunnel)
+			       struct rte_eth_l2_tunnel_conf *l2_tunnel,
+			       bool restore)
 {
 	int ret = 0;
 	struct ixgbe_l2_tn_info *l2_tn_info =
@@ -7203,30 +7204,33 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 	struct ixgbe_l2_tn_key key;
 	struct ixgbe_l2_tn_filter *node;
 
-	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
-	key.tn_id = l2_tunnel->tunnel_id;
+	if (!restore) {
+		key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+		key.tn_id = l2_tunnel->tunnel_id;
 
-	node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+		node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
 
-	if (node) {
-		PMD_DRV_LOG(ERR, "The L2 tunnel filter already exists!");
-		return -EINVAL;
-	}
+		if (node) {
+			PMD_DRV_LOG(ERR,
+				    "The L2 tunnel filter already exists!");
+			return -EINVAL;
+		}
 
-	node = rte_zmalloc("ixgbe_l2_tn",
-			   sizeof(struct ixgbe_l2_tn_filter),
-			   0);
-	if (!node)
-		return -ENOMEM;
+		node = rte_zmalloc("ixgbe_l2_tn",
+				   sizeof(struct ixgbe_l2_tn_filter),
+				   0);
+		if (!node)
+			return -ENOMEM;
 
-	(void)rte_memcpy(&node->key,
-			 &key,
-			 sizeof(struct ixgbe_l2_tn_key));
-	node->pool = l2_tunnel->pool;
-	ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
-	if (ret < 0) {
-		rte_free(node);
-		return ret;
+		(void)rte_memcpy(&node->key,
+				 &key,
+				 sizeof(struct ixgbe_l2_tn_key));
+		node->pool = l2_tunnel->pool;
+		ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+		if (ret < 0) {
+			rte_free(node);
+			return ret;
+		}
 	}
 
 	switch (l2_tunnel->l2_tunnel_type) {
@@ -7239,7 +7243,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 		break;
 	}
 
-	if (ret < 0)
+	if ((!restore) && (ret < 0))
 		(void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
 
 	return ret;
@@ -7300,7 +7304,8 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
 	case RTE_ETH_FILTER_ADD:
 		ret = ixgbe_dev_l2_tunnel_filter_add
 			(dev,
-			 (struct rte_eth_l2_tunnel_conf *)arg);
+			 (struct rte_eth_l2_tunnel_conf *)arg,
+			 FALSE);
 		break;
 	case RTE_ETH_FILTER_DELETE:
 		ret = ixgbe_dev_l2_tunnel_filter_del
@@ -7832,6 +7837,23 @@ ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
 	}
 }
 
+/* restore L2 tunnel filter */
+static inline void
+ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+	struct ixgbe_l2_tn_filter *node;
+	struct rte_eth_l2_tunnel_conf l2_tn_conf;
+
+	TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+		l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+		l2_tn_conf.tunnel_id      = node->key.tn_id;
+		l2_tn_conf.pool           = node->pool;
+		(void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+	}
+}
+
 static int
 ixgbe_filter_restore(struct rte_eth_dev *dev)
 {
@@ -7839,6 +7861,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev)
 	ixgbe_ethertype_filter_restore(dev);
 	ixgbe_syn_filter_restore(dev);
 	ixgbe_fdir_filter_restore(dev);
+	ixgbe_l2_tn_filter_restore(dev);
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 09/18] net/ixgbe: store and restore L2 tunnel configuration
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (7 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 08/18] net/ixgbe: restore L2 tunnel filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 10/18] net/ixgbe: flush all the filters Wei Zhao
                   ` (8 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for store and restore L2 tunnel filter in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 36 ++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  3 +++
 2 files changed, 39 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index cc56aab..783f426 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -391,6 +391,7 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 					 struct rte_eth_udp_tunnel *udp_tunnel);
 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
+static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -1357,6 +1358,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 			     "Failed to allocate memory for L2 TN hash map!");
 		return -ENOMEM;
 	}
+	l2_tn_info->e_tag_en = FALSE;
+	l2_tn_info->e_tag_fwd_en = FALSE;
+	l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
 
 	return 0;
 }
@@ -2501,6 +2505,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 
 	/* resume enabled intr since hw reset */
 	ixgbe_enable_intr(dev);
+	ixgbe_l2_tunnel_conf(dev);
 	ixgbe_filter_restore(dev);
 
 	return 0;
@@ -6954,12 +6959,15 @@ ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
 {
 	int ret = 0;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
 
 	if (l2_tunnel == NULL)
 		return -EINVAL;
 
 	switch (l2_tunnel->l2_tunnel_type) {
 	case RTE_L2_TUNNEL_TYPE_E_TAG:
+		l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
 		ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
 		break;
 	default:
@@ -6998,9 +7006,12 @@ ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
 {
 	int ret = 0;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
 
 	switch (l2_tunnel_type) {
 	case RTE_L2_TUNNEL_TYPE_E_TAG:
+		l2_tn_info->e_tag_en = TRUE;
 		ret = ixgbe_e_tag_enable(hw);
 		break;
 	default:
@@ -7039,9 +7050,12 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
 {
 	int ret = 0;
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
 
 	switch (l2_tunnel_type) {
 	case RTE_L2_TUNNEL_TYPE_E_TAG:
+		l2_tn_info->e_tag_en = FALSE;
 		ret = ixgbe_e_tag_disable(hw);
 		break;
 	default:
@@ -7348,10 +7362,13 @@ ixgbe_dev_l2_tunnel_forwarding_enable
 	(struct rte_eth_dev *dev,
 	 enum rte_eth_tunnel_type l2_tunnel_type)
 {
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
 	int ret = 0;
 
 	switch (l2_tunnel_type) {
 	case RTE_L2_TUNNEL_TYPE_E_TAG:
+		l2_tn_info->e_tag_fwd_en = TRUE;
 		ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
 		break;
 	default:
@@ -7369,10 +7386,13 @@ ixgbe_dev_l2_tunnel_forwarding_disable
 	(struct rte_eth_dev *dev,
 	 enum rte_eth_tunnel_type l2_tunnel_type)
 {
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
 	int ret = 0;
 
 	switch (l2_tunnel_type) {
 	case RTE_L2_TUNNEL_TYPE_E_TAG:
+		l2_tn_info->e_tag_fwd_en = FALSE;
 		ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
 		break;
 	default:
@@ -7866,6 +7886,22 @@ ixgbe_filter_restore(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (l2_tn_info->e_tag_en)
+		(void)ixgbe_e_tag_enable(hw);
+
+	if (l2_tn_info->e_tag_fwd_en)
+		(void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
+
+	(void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index d6253ad..6327962 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -307,6 +307,9 @@ struct ixgbe_l2_tn_info {
 	struct ixgbe_l2_tn_filter_list      l2_tn_list;
 	struct ixgbe_l2_tn_filter         **hash_map;
 	struct rte_hash                    *hash_handle;
+	bool e_tag_en; /* e-tag enabled */
+	bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+	bool e_tag_ether_type; /* ether type for e-tag */
 };
 
 /*
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 10/18] net/ixgbe: flush all the filters
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (8 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 09/18] net/ixgbe: store and restore L2 tunnel configuration Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 11/18] net/ixgbe: parse n-tuple filter Wei Zhao
                   ` (7 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add support for flush all the filters in SW.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 93 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  9 ++++
 drivers/net/ixgbe/ixgbe_fdir.c   | 24 +++++++++++
 drivers/net/ixgbe/ixgbe_pf.c     |  1 +
 4 files changed, 127 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 783f426..f84ca17 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -392,6 +392,7 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 					 struct rte_eth_udp_tunnel *udp_tunnel);
 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
+int ixgbe_flush_all_filter(struct rte_eth_dev *dev);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -6190,6 +6191,7 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
 		ethertype_filter.ethertype = filter->ether_type;
 		ethertype_filter.etqf = etqf;
 		ethertype_filter.etqs = etqs;
+		ethertype_filter.conf = FALSE;
 		ret = ixgbe_ethertype_filter_insert(filter_info,
 						    &ethertype_filter);
 		if (ret < 0) {
@@ -7902,6 +7904,97 @@ ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
 	(void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
 }
 
+/* remove all the n-tuple filters */
+static void
+ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	struct ixgbe_5tuple_filter *p_5tuple;
+
+	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+		ixgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+static void
+ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	int i;
+
+	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+		if (filter_info->ethertype_mask & (1 << i) &&
+		    !filter_info->ethertype_filters[i].conf) {
+			(void)ixgbe_ethertype_filter_remove(filter_info,
+							    (uint8_t)i);
+			IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
+			IXGBE_WRITE_FLUSH(hw);
+		}
+	}
+}
+
+/* remove the SYN filter */
+static void
+ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+	if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
+		filter_info->syn_info = 0;
+
+		IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+}
+
+/* remove all the L2 tunnel filters */
+static int
+ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+	struct ixgbe_l2_tn_info *l2_tn_info =
+		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+	struct ixgbe_l2_tn_filter *l2_tn_filter;
+	struct rte_eth_l2_tunnel_conf l2_tn_conf;
+	int ret = 0;
+
+	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+		l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+		l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
+		l2_tn_conf.pool           = l2_tn_filter->pool;
+		ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+int
+ixgbe_flush_all_filter(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+
+	ixgbe_clear_all_ntuple_filter(dev);
+	ixgbe_clear_all_ethertype_filter(dev);
+	ixgbe_clear_syn_filter(dev);
+
+	ret = ixgbe_clear_all_fdir_filter(dev);
+	if (ret < 0)
+		return ret;
+
+	ret = ixgbe_clear_all_l2_tn_filter(dev);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 6327962..9ed5f45 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -274,6 +274,11 @@ struct ixgbe_ethertype_filter {
 	uint16_t ethertype;
 	uint32_t etqf;
 	uint32_t etqs;
+	/**
+	 * If this filter is added by configuration,
+	 * it should not be removed.
+	 */
+	bool     conf;
 };
 
 /*
@@ -495,6 +500,7 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
 void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
 
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
@@ -525,6 +531,8 @@ ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
 				ethertype_filter->etqf;
 			filter_info->ethertype_filters[i].etqs =
 				ethertype_filter->etqs;
+			filter_info->ethertype_filters[i].conf =
+				ethertype_filter->conf;
 			return i;
 		}
 	}
@@ -541,6 +549,7 @@ ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
 	filter_info->ethertype_filters[idx].ethertype = 0;
 	filter_info->ethertype_filters[idx].etqf = 0;
 	filter_info->ethertype_filters[idx].etqs = 0;
+	filter_info->ethertype_filters[idx].etqs = FALSE;
 	return idx;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index d390972..7097dca 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -1514,3 +1514,27 @@ ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
 		}
 	}
 }
+
+/* remove all the flow director filters */
+int
+ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw_fdir_info *fdir_info =
+		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	struct ixgbe_fdir_filter *fdir_filter;
+	int ret = 0;
+
+	/* flush flow director */
+	rte_hash_reset(fdir_info->hash_handle);
+	memset(fdir_info->hash_map, 0,
+	       sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
+	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+		TAILQ_REMOVE(&fdir_info->fdir_list,
+			     fdir_filter,
+			     entries);
+		rte_free(fdir_filter);
+	}
+	ret = ixgbe_fdir_flush(dev);
+
+	return ret;
+}
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 6139915..5f017eb 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -197,6 +197,7 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
 				IXGBE_ETQF_TX_ANTISPOOF |
 				IXGBE_ETHERTYPE_FLOW_CTRL;
 	ethertype_filter.etqs = 0;
+	ethertype_filter.conf = TRUE;
 	i = ixgbe_ethertype_filter_insert(filter_info,
 					  &ethertype_filter);
 	if (i < 0) {
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 11/18] net/ixgbe: parse n-tuple filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (9 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 10/18] net/ixgbe: flush all the filters Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-20 17:23   ` Ferruh Yigit
  2016-12-02 10:43 ` [PATCH 12/18] net/ixgbe: parse ethertype filter Wei Zhao
                   ` (6 subsequent siblings)
  17 siblings, 1 reply; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

Add rule validate function and check if the rule is a n-tuple rule,
and get the n-tuple info.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 349 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 349 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f84ca17..d3768c6 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -61,6 +61,8 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -393,6 +395,26 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 int ixgbe_flush_all_filter(struct rte_eth_dev *dev);
+static enum
+rte_flow_error_type cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[],
+					struct rte_eth_ntuple_filter *filter);
+static enum
+rte_flow_error_type ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[],
+					struct rte_eth_ntuple_filter *filter);
+enum rte_flow_error_type
+ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[]);
+int ixgbe_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -778,6 +800,14 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /	\
 		sizeof(rte_ixgbevf_stats_strings[0]))
 
+static const struct rte_flow_ops ixgbe_flow_ops = {
+	ixgbe_flow_validate,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+};
+
 /**
  * Atomically reads the link status information from global
  * structure rte_eth_dev.
@@ -6311,6 +6341,11 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
 	case RTE_ETH_FILTER_L2_TUNNEL:
 		ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
 		break;
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ixgbe_flow_ops;
+		break;
 	default:
 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
 							filter_type);
@@ -7995,6 +8030,320 @@ ixgbe_flush_all_filter(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static inline uint32_t
+rte_be_to_cpu_24(uint32_t x)
+{
+	return  ((x & 0x000000ffUL) << 16) |
+		(x & 0x0000ff00UL) |
+		((x & 0x00ff0000UL) >> 16);
+}
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define PATTERN_SKIP_VOID(filter, filter_struct, ret)\
+		do {\
+			if (!pattern) {\
+			memset(filter, 0, sizeof(filter_struct));\
+			return ret;\
+			} \
+			item = pattern + i;\
+			while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+					i++;\
+					item = pattern + i;\
+			} \
+		} while (0)
+
+#define ACTION_SKIP_VOID(filter, filter_struct, ret)\
+		do {\
+			if (!actions) {\
+			memset(filter, 0, sizeof(filter_struct));\
+			return ret;\
+			} \
+			act = actions + i;\
+			while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+					i++;\
+					act = actions + i;\
+			} \
+		} while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ */
+static enum rte_flow_error_type
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			 const struct rte_flow_item pattern[],
+			 const struct rte_flow_action actions[],
+			 struct rte_eth_ntuple_filter *filter)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	uint32_t i;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/* the first not void item can be MAC or IPv4 */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	/* Skip Ethernet */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		/* if the first item is MAC, the content should be NULL */
+		if (item->spec || item->mask)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+
+		/* check if the next not void item is IPv4 */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* get the IPv4 info */
+	if (!item->spec || !item->mask)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+	/**
+	 * Only support src & dst addresses, protocol,
+	 * others should be masked.
+	 */
+	if (ipv4_mask->hdr.version_ihl ||
+	    ipv4_mask->hdr.type_of_service ||
+	    ipv4_mask->hdr.total_length ||
+	    ipv4_mask->hdr.packet_id ||
+	    ipv4_mask->hdr.fragment_offset ||
+	    ipv4_mask->hdr.time_to_live ||
+	    ipv4_mask->hdr.hdr_checksum)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+	filter->dst_ip = ipv4_spec->hdr.dst_addr;
+	filter->src_ip = ipv4_spec->hdr.src_addr;
+	filter->proto  = ipv4_spec->hdr.next_proto_id;
+
+	/* check if the next not void item is TCP or UDP */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* get the TCP/UDP info */
+	if (!item->spec || !item->mask)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+		/**
+		 * Only support src & dst ports, tcp flags,
+		 * others should be masked.
+		 */
+		if (tcp_mask->hdr.sent_seq ||
+		    tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off ||
+		    tcp_mask->hdr.rx_win ||
+		    tcp_mask->hdr.cksum ||
+		    tcp_mask->hdr.tcp_urp) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+		filter->src_port_mask  = tcp_mask->hdr.src_port;
+		if (tcp_mask->hdr.tcp_flags == 0xFF) {
+			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else if (!tcp_mask->hdr.tcp_flags) {
+			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+		filter->dst_port  = tcp_spec->hdr.dst_port;
+		filter->src_port  = tcp_spec->hdr.src_port;
+		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+	} else {
+		udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+		/**
+		 * Only support src & dst ports,
+		 * others should be masked.
+		 */
+		if (udp_mask->hdr.dgram_len ||
+		    udp_mask->hdr.dgram_cksum) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		filter->dst_port_mask = udp_mask->hdr.dst_port;
+		filter->src_port_mask = udp_mask->hdr.src_port;
+
+		udp_spec = (const struct rte_flow_item_udp *)item->spec;
+		filter->dst_port = udp_spec->hdr.dst_port;
+		filter->src_port = udp_spec->hdr.src_port;
+	}
+
+	/* check if the next not void item is END */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/************************************************
+	 * parse action
+	 ************************************************/
+	i = 0;
+
+	/**
+	 * n-tuple only supports forwarding,
+	 * check if the first not void action is QUEUE.
+	 */
+	ACTION_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+	filter->queue =
+		((const struct rte_flow_action_queue *)act->conf)->index;
+
+	/* check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	/************************************************
+	 * parse attr
+	 ************************************************/
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+	}
+
+	if (attr->priority > 0xFFFF) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+	filter->priority = (uint16_t)attr->priority;
+
+	return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static enum rte_flow_error_type
+ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			  const struct rte_flow_item pattern[],
+			  const struct rte_flow_action actions[],
+			  struct rte_eth_ntuple_filter *filter)
+{
+	int ret;
+
+	ret = cons_parse_ntuple_filter(attr, pattern, actions, filter);
+
+	if (ret)
+		return ret;
+
+	/* Ixgbe doesn't support tcp flags. */
+	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* Ixgbe doesn't support many priorities. */
+	if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+	    filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+
+	/* fixed value for ixgbe */
+	filter->flags = RTE_5TUPLE_FLAGS;
+	return 0;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+enum rte_flow_error_type
+ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[])
+{
+	int ret;
+	struct rte_eth_ntuple_filter ntuple_filter;
+
+	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
+	if (!ret)
+		return RTE_FLOW_ERROR_TYPE_NONE;
+
+
+	return ret;
+}
+
+/* Check whether a flow rule can be created on ixgbe. */
+int
+ixgbe_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	error->type = ixgbe_flow_rule_validate(dev, attr, pattern, actions);
+	if (error->type == RTE_FLOW_ERROR_TYPE_NONE)
+		return 0;
+	else
+		return -EINVAL;
+
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 12/18] net/ixgbe: parse ethertype filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (10 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 11/18] net/ixgbe: parse n-tuple filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 13/18] net/ixgbe: parse SYN filter Wei Zhao
                   ` (5 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

check if the rule is a ethertype rule, and get the ethertype info.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 166 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 162 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index d3768c6..a421062 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -405,11 +405,21 @@ rte_flow_error_type ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
 					const struct rte_flow_item pattern[],
 					const struct rte_flow_action actions[],
 					struct rte_eth_ntuple_filter *filter);
+static enum rte_flow_error_type
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_ethertype_filter *filter);
+static enum rte_flow_error_type
+ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_ethertype_filter *filter);
 enum rte_flow_error_type
 ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
-					const struct rte_flow_attr *attr,
-					const struct rte_flow_item pattern[],
-					const struct rte_flow_action actions[]);
+				const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[]);
 int ixgbe_flow_validate(struct rte_eth_dev *dev,
 		const struct rte_flow_attr *attr,
 		const struct rte_flow_item pattern[],
@@ -8306,6 +8316,149 @@ ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
 }
 
 /**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ */
+static enum rte_flow_error_type
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+			    const struct rte_flow_item pattern[],
+			    const struct rte_flow_action actions[],
+			    struct rte_eth_ethertype_filter *filter)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_action_queue *act_q;
+	uint32_t i, j;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/* the first not void item should be MAC */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	/* get the MAC info */
+	if (!item->spec || !item->mask)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	eth_spec = (const struct rte_flow_item_eth *)item->spec;
+	eth_mask = (const struct rte_flow_item_eth *)item->mask;
+	/**
+	 * Source MAC address must be masked.
+	 * Destination MAC address must be totally masked or not.
+	 */
+	if (eth_mask->src.addr_bytes[0] ||
+	    (eth_mask->dst.addr_bytes[0] != 0xFF &&
+	     eth_mask->dst.addr_bytes[0]))
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	for (j = 1; j < ETHER_ADDR_LEN; j++) {
+		if (eth_mask->src.addr_bytes[j] !=
+			eth_mask->src.addr_bytes[0] ||
+		    eth_mask->dst.addr_bytes[j] !=
+			 eth_mask->dst.addr_bytes[0])
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+	if ((rte_be_to_cpu_32(eth_mask->type) & 0xFFFF) != 0xFFFF)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	if (eth_mask->dst.addr_bytes[0]) {
+		filter->mac_addr = eth_spec->dst;
+		filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+	} else {
+		filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+	}
+	filter->ether_type = (uint16_t)rte_be_to_cpu_32(eth_spec->type);
+
+	/* check if the next not void item is END */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/************************************************
+	 * parse action
+	 ************************************************/
+	i = 0;
+
+	/* check if the first not void action is QUEUE or DROP. */
+	ACTION_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+		act_q = (const struct rte_flow_action_queue *)act->conf;
+		filter->queue = act_q->index;
+	} else {
+		filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+	}
+
+	/* check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	/************************************************
+	 * parse attr
+	 ************************************************/
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+	}
+
+	/* not supported */
+	if (attr->priority) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+
+	return 0;
+}
+static enum rte_flow_error_type
+ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
+			     const struct rte_flow_item pattern[],
+			     const struct rte_flow_action actions[],
+			     struct rte_eth_ethertype_filter *filter)
+{
+	int ret;
+
+	ret = cons_parse_ethertype_filter(attr, pattern, actions, filter);
+
+	if (ret)
+		return ret;
+
+	/* Ixgbe doesn't support MAC address. */
+	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	return 0;
+}
+
+/**
  * Check if the flow rule is supported by ixgbe.
  * It only checkes the format. Don't guarantee the rule can be programmed into
  * the HW. Because there can be no enough room for the rule.
@@ -8318,12 +8471,17 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 {
 	int ret;
 	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
 
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
 	ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
 	if (!ret)
 		return RTE_FLOW_ERROR_TYPE_NONE;
-
+	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+	ret = ixgbe_parse_ethertype_filter(attr, pattern,
+						actions, &ethertype_filter);
+	if (!ret)
+		return RTE_FLOW_ERROR_TYPE_NONE;
 
 	return ret;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 13/18] net/ixgbe: parse SYN filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (11 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 12/18] net/ixgbe: parse ethertype filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 14/18] net/ixgbe: parse L2 tunnel filter Wei Zhao
                   ` (4 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

check if the rule is a SYN rule, and get the SYN info.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 154 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 154 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index a421062..3ed749a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -415,6 +415,11 @@ ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
 				const struct rte_flow_item pattern[],
 				const struct rte_flow_action actions[],
 				struct rte_eth_ethertype_filter *filter);
+static enum rte_flow_error_type
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_eth_syn_filter *filter);
 enum rte_flow_error_type
 ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 				const struct rte_flow_attr *attr,
@@ -8459,6 +8464,148 @@ ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
 }
 
 /**
+ * Parse the rule to see if it is a SYN rule.
+ * And get the SYN filter info BTW.
+ */
+static enum rte_flow_error_type
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_syn_filter *filter)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_action_queue *act_q;
+	uint32_t i;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_TCP)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	/* Skip Ethernet */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		/* if the item is MAC, the content should be NULL */
+		if (item->spec || item->mask)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+
+		/* check if the next not void item is IPv4 or IPv6 */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		    item->type != RTE_FLOW_ITEM_TYPE_IPV6)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* Skip IP */
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+	    item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		/* if the item is IP, the content should be NULL */
+		if (item->spec || item->mask)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+
+		/* check if the next not void item is TCP */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_TCP)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* Get the TCP info. Only support SYN. */
+	if (!item->spec || !item->mask)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+	tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+	if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+	    tcp_mask->hdr.src_port ||
+	    tcp_mask->hdr.dst_port ||
+	    tcp_mask->hdr.sent_seq ||
+	    tcp_mask->hdr.recv_ack ||
+	    tcp_mask->hdr.data_off ||
+	    tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+	    tcp_mask->hdr.rx_win ||
+	    tcp_mask->hdr.cksum ||
+	    tcp_mask->hdr.tcp_urp) {
+		memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* check if the next not void item is END */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/************************************************
+	 * parse action
+	 ************************************************/
+	i = 0;
+
+	/* check if the first not void action is QUEUE. */
+	ACTION_SKIP_VOID(filter, struct rte_eth_syn_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	act_q = (const struct rte_flow_action_queue *)act->conf;
+	filter->queue = act_q->index;
+
+	/* check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_syn_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	/************************************************
+	 * parse attr
+	 ************************************************/
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+	}
+
+	/* Support 2 priorities, the lowest or highest. */
+	if (!attr->priority) {
+		filter->hig_pri = 0;
+	} else if (attr->priority == (uint32_t)~0U) {
+		filter->hig_pri = 1;
+	} else {
+		memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+
+	return 0;
+}
+
+/**
  * Check if the flow rule is supported by ixgbe.
  * It only checkes the format. Don't guarantee the rule can be programmed into
  * the HW. Because there can be no enough room for the rule.
@@ -8472,17 +8619,24 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 	int ret;
 	struct rte_eth_ntuple_filter ntuple_filter;
 	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_syn_filter syn_filter;
 
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
 	ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
 	if (!ret)
 		return RTE_FLOW_ERROR_TYPE_NONE;
+
 	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
 	ret = ixgbe_parse_ethertype_filter(attr, pattern,
 						actions, &ethertype_filter);
 	if (!ret)
 		return RTE_FLOW_ERROR_TYPE_NONE;
 
+	memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+	ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter);
+	if (!ret)
+		return RTE_FLOW_ERROR_TYPE_NONE;
+
 	return ret;
 }
 
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 14/18] net/ixgbe: parse L2 tunnel filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (12 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 13/18] net/ixgbe: parse SYN filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 15/18] net/ixgbe: parse flow director filter Wei Zhao
                   ` (3 subsequent siblings)
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

check if the rule is a L2 tunnel rule, and get the L2 tunnel info.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 139 +++++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  22 +++++++
 2 files changed, 161 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 3ed749a..104277d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -420,6 +420,11 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_eth_syn_filter *filter);
+static enum rte_flow_error_type
+cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_eth_l2_tunnel_conf *filter);
 enum rte_flow_error_type
 ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 				const struct rte_flow_attr *attr,
@@ -8606,6 +8611,136 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
 }
 
 /**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ */
+static enum rte_flow_error_type
+cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_eth_l2_tunnel_conf *filter)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_e_tag *e_tag_spec;
+	const struct rte_flow_item_e_tag *e_tag_mask;
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_queue *act_q;
+	uint32_t i, j;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/* The first not void item should be e-tag. */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_l2_tunnel_conf,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	if (!item->spec || !item->mask) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
+	e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+
+	/* Src & dst MAC address should be masked. */
+	for (j = 0; j < ETHER_ADDR_LEN; j++) {
+		if (e_tag_mask->src.addr_bytes[j] ||
+		    e_tag_mask->dst.addr_bytes[j]) {
+			memset(filter, 0,
+			       sizeof(struct rte_eth_l2_tunnel_conf));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/* Only care about GRP and E cid base. */
+	if (e_tag_mask->e_tag_ethertype ||
+	    e_tag_mask->e_pcp ||
+	    e_tag_mask->dei ||
+	    e_tag_mask->in_e_cid_base ||
+	    e_tag_mask->in_e_cid_ext ||
+	    e_tag_mask->e_cid_ext ||
+	    e_tag_mask->type ||
+	    e_tag_mask->tags ||
+	    e_tag_mask->grp != 0x3 ||
+	    e_tag_mask->e_cid_base != 0xFFF) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+	/**
+	 * grp and e_cid_base are bit fields and only use 14 bits.
+	 * e-tag id is taken as little endian by HW.
+	 */
+	filter->tunnel_id = e_tag_spec->grp << 12;
+	filter->tunnel_id |= rte_be_to_cpu_16(e_tag_spec->e_cid_base);
+
+	/* check if the next not void item is END */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_l2_tunnel_conf,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/************************************************
+	 * parse attr
+	 ************************************************/
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+	}
+
+	/* not supported */
+	if (attr->priority) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+
+	/************************************************
+	 * parse action
+	 ************************************************/
+	i = 0;
+
+	/* check if the first not void action is QUEUE. */
+	ACTION_SKIP_VOID(filter, struct rte_eth_l2_tunnel_conf,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	act_q = (const struct rte_flow_action_queue *)act->conf;
+	filter->pool = act_q->index;
+
+	/* check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_l2_tunnel_conf,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	return 0;
+}
+
+/**
  * Check if the flow rule is supported by ixgbe.
  * It only checkes the format. Don't guarantee the rule can be programmed into
  * the HW. Because there can be no enough room for the rule.
@@ -8620,6 +8755,7 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 	struct rte_eth_ntuple_filter ntuple_filter;
 	struct rte_eth_ethertype_filter ethertype_filter;
 	struct rte_eth_syn_filter syn_filter;
+	struct rte_eth_l2_tunnel_conf l2_tn_filter;
 
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
 	ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
@@ -8637,6 +8773,9 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 	if (!ret)
 		return RTE_FLOW_ERROR_TYPE_NONE;
 
+	memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+	ret = cons_parse_l2_tn_filter(attr, pattern, actions, &l2_tn_filter);
+
 	return ret;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 9ed5f45..4aa5fd5 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -553,4 +553,26 @@ ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
 	return idx;
 }
 
+#define RTE_FLOW_ITEM_TYPE_E_TAG 0xF2
+
+/* E-tag Header.*/
+struct rte_flow_item_e_tag {
+	struct ether_addr dst; /**< Destination MAC. */
+	struct ether_addr src; /**< Source MAC. */
+	uint16_t e_tag_ethertype; /**< E-tag EtherType, 0x893F. */
+	uint16_t e_pcp:3; /**<  E-PCP */
+	uint16_t dei:1; /**< DEI */
+	uint16_t in_e_cid_base:12; /**< Ingress E-CID base */
+	uint16_t rsv:2; /**< reserved */
+	uint16_t grp:2; /**< GRP */
+	uint16_t e_cid_base:12; /**< E-CID base */
+	uint16_t in_e_cid_ext:8; /**< Ingress E-CID extend */
+	uint16_t e_cid_ext:8; /**< E-CID extend */
+	uint16_t type; /**< MAC type. */
+	unsigned int tags; /**< Number of 802.1Q/ad tags defined. */
+	struct {
+		uint16_t tpid; /**< Tag protocol identifier. */
+		uint16_t tci; /**< Tag control information. */
+	} tag[]; /**< 802.1Q/ad tag definitions, outermost first. */
+};
 #endif /* _IXGBE_ETHDEV_H_ */
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 15/18] net/ixgbe: parse flow director filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (13 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 14/18] net/ixgbe: parse L2 tunnel filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-20 17:00   ` Ferruh Yigit
  2016-12-02 10:43 ` [PATCH 16/18] net/ixgbe: create consistent filter Wei Zhao
                   ` (2 subsequent siblings)
  17 siblings, 1 reply; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

check if the rule is a flow director rule, and get the flow director info.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 823 +++++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  32 +-
 drivers/net/ixgbe/ixgbe_fdir.c   | 247 ++++++++----
 3 files changed, 1019 insertions(+), 83 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 104277d..75fadb0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -425,6 +425,21 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_eth_l2_tunnel_conf *filter);
+static enum rte_flow_error_type
+ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct ixgbe_fdir_rule *rule);
+static enum rte_flow_error_type
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct ixgbe_fdir_rule *rule);
+static enum rte_flow_error_type
+ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct ixgbe_fdir_rule *rule);
 enum rte_flow_error_type
 ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 				const struct rte_flow_attr *attr,
@@ -1390,6 +1405,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 			     "Failed to allocate memory for fdir hash map!");
 		return -ENOMEM;
 	}
+	fdir_info->mask_added = FALSE;
 
 	/* initialize l2 tunnel filter list & hash */
 	TAILQ_INIT(&l2_tn_info->l2_tn_list);
@@ -8609,6 +8625,808 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
 
 	return 0;
 }
+/* Parse to get the attr and action info of flow director rule. */
+static int
+ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+			  const struct rte_flow_action actions[],
+			  struct ixgbe_fdir_rule *rule)
+{
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark;
+	uint32_t i;
+
+	/************************************************
+	 * parse attr
+	 ************************************************/
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+	}
+
+	/* not supported */
+	if (attr->priority) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+
+	/************************************************
+	 * parse action
+	 ************************************************/
+	i = 0;
+
+	/* check if the first not void action is QUEUE or DROP. */
+	ACTION_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+		act_q = (const struct rte_flow_action_queue *)act->conf;
+		rule->queue = act_q->index;
+	} else { /* drop */
+		rule->fdirflags = IXGBE_FDIRCMD_DROP;
+	}
+
+	/* check if the next not void item is MARK */
+	i++;
+	ACTION_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_MARK) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	mark = (const struct rte_flow_action_mark *)act->conf;
+	rule->soft_id = mark->id;
+
+	/* check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ */
+static enum rte_flow_error_type
+ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       struct ixgbe_fdir_rule *rule)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec;
+	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+
+	uint32_t i, j;
+
+	/**
+	 * Some fields may not be provided. Set spec to 0 and mask to default
+	 * value. So, we need not do anything for the not provided fields later.
+	 */
+	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+	rule->mask.vlan_tci_mask = 0;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/**
+	 * The first not void item should be
+	 * MAC or IPv4 or TCP or UDP or SCTP.
+	 */
+	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	rule->mode = RTE_FDIR_MODE_PERFECT;
+
+	/* Get the MAC info. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		/**
+		 * Only support vlan and dst MAC address,
+		 * others should be masked.
+		 */
+		if (item->spec && !item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+			/* Get the dst MAC. */
+			for (j = 0; j < ETHER_ADDR_LEN; j++) {
+				rule->ixgbe_fdir.formatted.inner_mac[j] =
+					eth_spec->dst.addr_bytes[j];
+			}
+		}
+
+
+		if (item->mask) {
+			/* If ethernet has meaning, it means MAC VLAN mode. */
+			rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+			rule->b_mask = TRUE;
+			eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+			/* Ether type should be masked. */
+			if (eth_mask->type) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				return RTE_FLOW_ERROR_TYPE_ITEM;
+			}
+
+			/**
+			* src MAC address must be masked,
+			* and don't support dst MAC address mask.
+			*/
+			for (j = 0; j < ETHER_ADDR_LEN; j++) {
+				if (eth_mask->src.addr_bytes[j] ||
+					eth_mask->dst.addr_bytes[j] != 0xFF) {
+					memset(rule, 0,
+					sizeof(struct ixgbe_fdir_rule));
+					return RTE_FLOW_ERROR_TYPE_ITEM;
+				}
+			}
+
+			/* When no VLAN, considered as full mask. */
+			rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+		}
+		/*** If both spec and mask are NULL,
+		* it means don't care about ETH.
+		* Do nothing.
+		*/
+
+		/**
+		 * Check if the next not void item is vlan or ipv4.
+		 * IPv6 is not supported.
+		*/
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+			if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				return RTE_FLOW_ERROR_TYPE_ITEM;
+			}
+		} else {
+			if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				return RTE_FLOW_ERROR_TYPE_ITEM;
+			}
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		if (!(item->spec && item->mask)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+		if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+		if (vlan_mask->tpid != (uint16_t)~0U) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->mask.vlan_tci_mask = vlan_mask->tci;
+		/* More than one tags are not supported. */
+
+		/**
+		* Check if the next not void item is not vlan.
+		*/
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		} else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/* Get the IP info. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+		/**
+		 * Set the flow type even if there's no content
+		 * as we must have a flow type.
+		 */
+		rule->ixgbe_fdir.formatted.flow_type =
+			IXGBE_ATR_FLOW_TYPE_IPV4;
+
+		/**
+		 * Only care about src & dst addresses,
+		 * others should be masked.
+		 */
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->b_mask = TRUE;
+		ipv4_mask =
+			(const struct rte_flow_item_ipv4 *)item->mask;
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.next_proto_id ||
+		    ipv4_mask->hdr.hdr_checksum) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+		rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			ipv4_spec =
+				(const struct rte_flow_item_ipv4 *)item->spec;
+			rule->ixgbe_fdir.formatted.dst_ip[0] =
+				ipv4_spec->hdr.dst_addr;
+			rule->ixgbe_fdir.formatted.src_ip[0] =
+				ipv4_spec->hdr.src_addr;
+		}
+
+		/**
+		 * Check if the next not void item is
+		 * TCP or UDP or SCTP or END.
+		 */
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/* Get the TCP info. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		/**
+		 * Set the flow type even if there's no content
+		 * as we must have a flow type.
+		 */
+		rule->ixgbe_fdir.formatted.flow_type =
+			IXGBE_ATR_FLOW_TYPE_TCPV4;
+
+		/**
+		 * Only care about src & dst ports,
+		 * others should be masked.
+		 */
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->b_mask = TRUE;
+		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+		if (tcp_mask->hdr.sent_seq ||
+		    tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off ||
+		    tcp_mask->hdr.tcp_flags ||
+		    tcp_mask->hdr.rx_win ||
+		    tcp_mask->hdr.cksum ||
+		    tcp_mask->hdr.tcp_urp) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+		rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+			rule->ixgbe_fdir.formatted.src_port =
+				tcp_spec->hdr.src_port;
+			rule->ixgbe_fdir.formatted.dst_port =
+				tcp_spec->hdr.dst_port;
+		}
+	}
+
+	/* Get the UDP info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		/**
+		 * Set the flow type even if there's no content
+		 * as we must have a flow type.
+		 */
+		rule->ixgbe_fdir.formatted.flow_type =
+			IXGBE_ATR_FLOW_TYPE_UDPV4;
+
+		/**
+		 * Only care about src & dst ports,
+		 * others should be masked.
+		 */
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->b_mask = TRUE;
+		udp_mask = (const struct rte_flow_item_udp *)item->mask;
+		if (udp_mask->hdr.dgram_len ||
+		    udp_mask->hdr.dgram_cksum) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->mask.src_port_mask = udp_mask->hdr.src_port;
+		rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			udp_spec = (const struct rte_flow_item_udp *)item->spec;
+			rule->ixgbe_fdir.formatted.src_port =
+				udp_spec->hdr.src_port;
+			rule->ixgbe_fdir.formatted.dst_port =
+				udp_spec->hdr.dst_port;
+		}
+	}
+
+	/* Get the SCTP info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+		/**
+		 * Set the flow type even if there's no content
+		 * as we must have a flow type.
+		 */
+		rule->ixgbe_fdir.formatted.flow_type =
+			IXGBE_ATR_FLOW_TYPE_SCTPV4;
+
+		/**
+		 * Only care about src & dst ports,
+		 * others should be masked.
+		 */
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->b_mask = TRUE;
+		sctp_mask =
+			(const struct rte_flow_item_sctp *)item->mask;
+		if (sctp_mask->hdr.tag ||
+		    sctp_mask->hdr.cksum) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+		rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			sctp_spec =
+				(const struct rte_flow_item_sctp *)item->spec;
+			rule->ixgbe_fdir.formatted.src_port =
+				sctp_spec->hdr.src_port;
+			rule->ixgbe_fdir.formatted.dst_port =
+				sctp_spec->hdr.dst_port;
+		}
+	}
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		/* check if the next not void item is END */
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	return ixgbe_parse_fdir_act_attr(attr, actions, rule);
+}
+
+#define NVGRE_PROTOCOL 0x6558
+
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ */
+static enum rte_flow_error_type
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+			       const struct rte_flow_item pattern[],
+			       const struct rte_flow_action actions[],
+			       struct ixgbe_fdir_rule *rule)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_vxlan *vxlan_spec;
+	const struct rte_flow_item_vxlan *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec;
+	const struct rte_flow_item_nvgre *nvgre_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	uint32_t i, j;
+
+	/**
+	 * Some fields may not be provided. Set spec to 0 and mask to default
+	 * value. So, we need not do anything for the not provided fields later.
+	 */
+	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+	rule->mask.vlan_tci_mask = 0;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/**
+	 * The first not void item should be
+	 * MAC or IPv4 or IPv6 or UDP or VxLAN.
+	 */
+	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+	/* Skip MAC. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		/* Only used to describe the protocol stack. */
+		if (item->spec || item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		/* Check if the next not void item is IPv4 or IPv6. */
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		    item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/* Skip IP. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+	    item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		/* Only used to describe the protocol stack. */
+		if (item->spec || item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		/* Check if the next not void item is UDP or NVGRE. */
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+		    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/* Skip UDP. */
+	if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		/* Only used to describe the protocol stack. */
+		if (item->spec || item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		/* Check if the next not void item is VxLAN. */
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/* Get the VxLAN info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+		rule->ixgbe_fdir.formatted.tunnel_type =
+			RTE_FDIR_TUNNEL_TYPE_VXLAN;
+
+		/* Only care about VNI, others should be masked. */
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->b_mask = TRUE;
+
+		/* Tunnel type is always meaningful. */
+		rule->mask.tunnel_type_mask = 1;
+
+		vxlan_mask =
+			(const struct rte_flow_item_vxlan *)item->mask;
+		if (vxlan_mask->flags) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		/* VNI must be totally masked or not. */
+		if ((vxlan_mask->vni[0] || vxlan_mask->vni[1]
+			|| vxlan_mask->vni[2]) &&
+		    ((vxlan_mask->vni[0] != 0xFF) ||
+			(vxlan_mask->vni[1] != 0xFF) ||
+				(vxlan_mask->vni[2] != 0xFF))) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
+			RTE_DIM(vxlan_mask->vni));
+		rule->mask.tunnel_id_mask <<= 8;
+
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			vxlan_spec = (const struct rte_flow_item_vxlan *)
+					item->spec;
+			rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
+				vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
+			rule->ixgbe_fdir.formatted.tni_vni <<= 8;
+		}
+	}
+
+	/* Get the NVGRE info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
+		rule->ixgbe_fdir.formatted.tunnel_type =
+			RTE_FDIR_TUNNEL_TYPE_NVGRE;
+
+		/**
+		 * Only care about flags0, flags1, protocol and TNI,
+		 * others should be masked.
+		 */
+		if (!item->mask) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->b_mask = TRUE;
+
+		/* Tunnel type is always meaningful. */
+		rule->mask.tunnel_type_mask = 1;
+
+		nvgre_mask =
+			(const struct rte_flow_item_nvgre *)item->mask;
+		if (nvgre_mask->ver ||
+		    nvgre_mask->flow_id) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		if (!nvgre_mask->flags0 ||
+		    nvgre_mask->flags1 != 0x3 ||
+		    nvgre_mask->protocol != 0xFFFF) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		/* TNI must be totally masked or not. */
+		if (nvgre_mask->tni &&
+		    nvgre_mask->tni != 0xFFFFFF) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		/* tni is a 24-bits bit field */
+		rule->mask.tunnel_id_mask = rte_be_to_cpu_24(nvgre_mask->tni);
+		rule->mask.tunnel_id_mask =
+			rte_cpu_to_be_32(rule->mask.tunnel_id_mask);
+
+		if (item->spec) {
+			rule->b_spec = TRUE;
+			nvgre_spec =
+				(const struct rte_flow_item_nvgre *)item->spec;
+			if (nvgre_spec->flags0 ||
+			    nvgre_spec->flags1 != 2 ||
+			    nvgre_spec->protocol !=
+			    rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
+				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+				return RTE_FLOW_ERROR_TYPE_ITEM;
+			}
+			/* tni is a 24-bits bit field */
+			rule->ixgbe_fdir.formatted.tni_vni =
+				rte_be_to_cpu_24(nvgre_spec->tni);
+			rule->ixgbe_fdir.formatted.tni_vni =
+				rte_cpu_to_be_32(
+					rule->ixgbe_fdir.formatted.tni_vni);
+		}
+	}
+
+	/* check if the next not void item is MAC */
+	i++;
+	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/**
+	 * Only support vlan and dst MAC address,
+	 * others should be masked.
+	 */
+
+	if (!item->mask) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+	rule->b_mask = TRUE;
+	eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+	/* Ether type should be masked. */
+	if (eth_mask->type) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* src MAC address should be masked. */
+	for (j = 0; j < ETHER_ADDR_LEN; j++) {
+		if (eth_mask->src.addr_bytes[j]) {
+			memset(rule, 0,
+			       sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+	rule->mask.mac_addr_byte_mask = 0;
+	for (j = 0; j < ETHER_ADDR_LEN; j++) {
+		/* It's a per byte mask. */
+		if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+			rule->mask.mac_addr_byte_mask |= 0x1 << j;
+		} else if (eth_mask->dst.addr_bytes[j]) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/* When no vlan, considered as full mask. */
+	rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+	if (item->spec) {
+		rule->b_spec = TRUE;
+		eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+		/* Get the dst MAC. */
+		for (j = 0; j < ETHER_ADDR_LEN; j++) {
+			rule->ixgbe_fdir.formatted.inner_mac[j] =
+				eth_spec->dst.addr_bytes[j];
+		}
+	}
+
+	/**
+	* Check if the next not void item is vlan or ipv4.
+	* IPv6 is not supported.
+	*/
+	i++;
+	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+		RTE_FLOW_ERROR_TYPE_ITEM);
+	if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN)
+		&& (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
+		memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		if (!(item->spec && item->mask)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+		if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+		if (vlan_mask->tpid != (uint16_t)~0U) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		rule->mask.vlan_tci_mask = vlan_mask->tci;
+		/* More than one tags are not supported. */
+
+		/**
+		* Check if the next not void item is not vlan.
+		*/
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		} else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+		/* check if the next not void item is END */
+		i++;
+		PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+	}
+
+	/**
+	* If the tags is 0, it means don't care about the VLAN.
+	* Do nothing.
+	*/
+
+	return ixgbe_parse_fdir_act_attr(attr, actions, rule);
+}
+
+static enum rte_flow_error_type
+ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct ixgbe_fdir_rule *rule)
+{
+	int ret;
+
+	ret = ixgbe_parse_fdir_filter_normal(attr, pattern, actions, rule);
+
+	if (!ret)
+		return 0;
+
+	ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, actions, rule);
+
+	return ret;
+}
 
 /**
  * Parse the rule to see if it is a L2 tunnel rule.
@@ -8756,6 +9574,7 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 	struct rte_eth_ethertype_filter ethertype_filter;
 	struct rte_eth_syn_filter syn_filter;
 	struct rte_eth_l2_tunnel_conf l2_tn_filter;
+	struct ixgbe_fdir_rule fdir_rule;
 
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
 	ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
@@ -8772,6 +9591,10 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 	ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter);
 	if (!ret)
 		return RTE_FLOW_ERROR_TYPE_NONE;
+	memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+	ret = ixgbe_parse_fdir_filter(attr, pattern, actions, &fdir_rule);
+	if (!ret)
+		return RTE_FLOW_ERROR_TYPE_NONE;
 
 	memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
 	ret = cons_parse_l2_tn_filter(attr, pattern, actions, &l2_tn_filter);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 4aa5fd5..d23ad67 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -162,6 +162,17 @@ struct ixgbe_fdir_filter {
 /* list of fdir filters */
 TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
 
+struct ixgbe_fdir_rule {
+	struct ixgbe_hw_fdir_mask mask;
+	union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+	bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
+	bool b_mask; /* If TRUE, mask has meaning. */
+	enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+	uint32_t fdirflags; /* drop or forward */
+	uint32_t soft_id; /* an unique value for this rule */
+	uint8_t queue; /* assigned rx queue */
+};
+
 struct ixgbe_hw_fdir_info {
 	struct ixgbe_hw_fdir_mask mask;
 	uint8_t     flex_bytes_offset;
@@ -177,6 +188,7 @@ struct ixgbe_hw_fdir_info {
 	/* store the pointers of the filters, index is the hash value. */
 	struct ixgbe_fdir_filter **hash_map;
 	struct rte_hash *hash_handle; /* cuckoo hash handler */
+	bool mask_added; /* If already got mask from consistent filter */
 };
 
 /* structure for interrupt relative data */
@@ -473,6 +485,10 @@ bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
  * Flow director function prototypes
  */
 int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+			      struct ixgbe_fdir_rule *rule,
+			      bool del, bool update);
 
 void ixgbe_configure_dcb(struct rte_eth_dev *dev);
 
@@ -553,9 +569,23 @@ ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
 	return idx;
 }
 
+
+#define RTE_FLOW_ITEM_TYPE_NVGRE 0xF1
 #define RTE_FLOW_ITEM_TYPE_E_TAG 0xF2
 
-/* E-tag Header.*/
+
+struct rte_flow_item_nvgre {
+	uint32_t flags0:1; /**< 0 */
+	uint32_t rsvd1:1; /**< 1 bit not defined */
+	uint32_t flags1:2; /**< 2 bits, 1 0 */
+	uint32_t rsvd0:9; /**< Reserved0 */
+	uint32_t ver:3; /**< version */
+	uint32_t protocol:16; /**< protocol type, 0x6558 */
+	uint32_t tni:24; /**< tenant network ID or virtual subnet ID */
+	uint32_t flow_id:8; /**< flow ID or Reserved */
+};
+
+/* E-tag Header. Need to move to RTE */
 struct rte_flow_item_e_tag {
 	struct ether_addr dst; /**< Destination MAC. */
 	struct ether_addr src; /**< Source MAC. */
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 7097dca..0c6464f 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -112,10 +112,8 @@
 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
 static int fdir_set_input_mask(struct rte_eth_dev *dev,
 			       const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
-		const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
-				    const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
 		const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
@@ -295,8 +293,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
  * but makes use of the rte_fdir_masks structure to see which bits to set.
  */
 static int
-fdir_set_input_mask_82599(struct rte_eth_dev *dev,
-		const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_fdir_info *info =
@@ -308,8 +305,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
 	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
 	uint32_t fdirtcpm;  /* TCP source and destination port masks. */
 	uint32_t fdiripv6m; /* IPv6 source and destination masks. */
-	uint16_t dst_ipv6m = 0;
-	uint16_t src_ipv6m = 0;
 	volatile uint32_t *reg;
 
 	PMD_INIT_FUNC_TRACE();
@@ -320,31 +315,30 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
 	 * cannot be masked out in this implementation.
 	 */
-	if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+	if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
 		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
 		fdirm |= IXGBE_FDIRM_L4P;
 
-	if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+	if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
 		/* mask VLAN Priority */
 		fdirm |= IXGBE_FDIRM_VLANP;
-	else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+	else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
 		/* mask VLAN ID */
 		fdirm |= IXGBE_FDIRM_VLANID;
-	else if (input_mask->vlan_tci_mask == 0)
+	else if (info->mask.vlan_tci_mask == 0)
 		/* mask VLAN ID and Priority */
 		fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
-	else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+	else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
 		PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
 		return -EINVAL;
 	}
-	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
 	/* store the TCP/UDP port masks, bit reversed from port layout */
 	fdirtcpm = reverse_fdir_bitmasks(
-			rte_be_to_cpu_16(input_mask->dst_port_mask),
-			rte_be_to_cpu_16(input_mask->src_port_mask));
+			rte_be_to_cpu_16(info->mask.dst_port_mask),
+			rte_be_to_cpu_16(info->mask.src_port_mask));
 
 	/* write all the same so that UDP, TCP and SCTP use the same mask
 	 * (little-endian)
@@ -352,30 +346,23 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
-	info->mask.src_port_mask = input_mask->src_port_mask;
-	info->mask.dst_port_mask = input_mask->dst_port_mask;
 
 	/* Store source and destination IPv4 masks (big-endian),
 	 * can not use IXGBE_WRITE_REG.
 	 */
 	reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
-	*reg = ~(input_mask->ipv4_mask.src_ip);
+	*reg = ~(info->mask.src_ipv4_mask);
 	reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
-	*reg = ~(input_mask->ipv4_mask.dst_ip);
-	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
-	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+	*reg = ~(info->mask.dst_ipv4_mask);
 
 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
 		/*
 		 * Store source and destination IPv6 masks (bit reversed)
 		 */
-		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
-		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
-		fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+		fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
+			    info->mask.src_ipv6_mask;
 
 		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
-		info->mask.src_ipv6_mask = src_ipv6m;
-		info->mask.dst_ipv6_mask = dst_ipv6m;
 	}
 
 	return IXGBE_SUCCESS;
@@ -386,8 +373,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
  * but makes use of the rte_fdir_masks structure to see which bits to set.
  */
 static int
-fdir_set_input_mask_x550(struct rte_eth_dev *dev,
-			 const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_fdir_info *info =
@@ -410,20 +396,19 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
 	/* some bits must be set for mac vlan or tunnel mode */
 	fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
 
-	if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+	if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
 		/* mask VLAN Priority */
 		fdirm |= IXGBE_FDIRM_VLANP;
-	else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+	else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
 		/* mask VLAN ID */
 		fdirm |= IXGBE_FDIRM_VLANID;
-	else if (input_mask->vlan_tci_mask == 0)
+	else if (info->mask.vlan_tci_mask == 0)
 		/* mask VLAN ID and Priority */
 		fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
-	else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+	else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
 		PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
 		return -EINVAL;
 	}
-	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -434,12 +419,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
 				IXGBE_FDIRIP6M_TNI_VNI;
 
 	if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
-		mac_mask = input_mask->mac_addr_byte_mask;
+		mac_mask = info->mask.mac_addr_byte_mask;
 		fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
 				& IXGBE_FDIRIP6M_INNER_MAC;
-		info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
 
-		switch (input_mask->tunnel_type_mask) {
+		switch (info->mask.tunnel_type_mask) {
 		case 0:
 			/* Mask turnnel type */
 			fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
@@ -450,10 +434,8 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
 			PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
 			return -EINVAL;
 		}
-		info->mask.tunnel_type_mask =
-			input_mask->tunnel_type_mask;
 
-		switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
+		switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
 		case 0x0:
 			/* Mask vxlan id */
 			fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
@@ -467,8 +449,6 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
 			PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
 			return -EINVAL;
 		}
-		info->mask.tunnel_id_mask =
-			input_mask->tunnel_id_mask;
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
@@ -482,22 +462,90 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
 }
 
 static int
-fdir_set_input_mask(struct rte_eth_dev *dev,
-		    const struct rte_eth_fdir_masks *input_mask)
+ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
+				  const struct rte_eth_fdir_masks *input_mask)
+{
+	struct ixgbe_hw_fdir_info *info =
+		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	uint16_t dst_ipv6m = 0;
+	uint16_t src_ipv6m = 0;
+
+	memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+	info->mask.src_port_mask = input_mask->src_port_mask;
+	info->mask.dst_port_mask = input_mask->dst_port_mask;
+	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+	IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+	IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+	info->mask.src_ipv6_mask = src_ipv6m;
+	info->mask.dst_ipv6_mask = dst_ipv6m;
+
+	return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
+				 const struct rte_eth_fdir_masks *input_mask)
+{
+	struct ixgbe_hw_fdir_info *info =
+		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+
+	memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+	info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+	info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
+	info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
+
+	return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
+			    const struct rte_eth_fdir_masks *input_mask)
+{
+	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+	if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+	    mode <= RTE_FDIR_MODE_PERFECT)
+		return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
+	else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+		 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+		return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
+
+	PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+	return -ENOTSUP;
+}
+
+int
+ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
 {
 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 
 	if (mode >= RTE_FDIR_MODE_SIGNATURE &&
 	    mode <= RTE_FDIR_MODE_PERFECT)
-		return fdir_set_input_mask_82599(dev, input_mask);
+		return fdir_set_input_mask_82599(dev);
 	else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
 		 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
-		return fdir_set_input_mask_x550(dev, input_mask);
+		return fdir_set_input_mask_x550(dev);
 
 	PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
 	return -ENOTSUP;
 }
 
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+		    const struct rte_eth_fdir_masks *input_mask)
+{
+	int ret;
+
+	ret = ixgbe_fdir_store_input_mask(dev, input_mask);
+	if (ret)
+		return ret;
+
+	return ixgbe_fdir_set_input_mask(dev);
+}
+
 /*
  * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
  * arguments are valid
@@ -1135,23 +1183,40 @@ ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
 	return 0;
 }
 
-/*
- * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
- * @dev: pointer to the structure rte_eth_dev
- * @fdir_filter: fdir filter entry
- * @del: 1 - delete, 0 - add
- * @update: 1 - update
- */
 static int
-ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
-			  const struct rte_eth_fdir_filter *fdir_filter,
+ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+			    const struct rte_eth_fdir_filter *fdir_filter,
+			    struct ixgbe_fdir_rule *rule)
+{
+	enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+	int err;
+
+	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+
+	err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
+					     &rule->ixgbe_fdir,
+					     fdir_mode);
+	if (err)
+		return err;
+
+	rule->mode = fdir_mode;
+	if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+		rule->fdirflags = IXGBE_FDIRCMD_DROP;
+	rule->queue = fdir_filter->action.rx_queue;
+	rule->soft_id = fdir_filter->soft_id;
+
+	return 0;
+}
+
+int
+ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+			  struct ixgbe_fdir_rule *rule,
 			  bool del,
 			  bool update)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t fdircmd_flags;
 	uint32_t fdirhash;
-	union ixgbe_atr_input input;
 	uint8_t queue;
 	bool is_perfect = FALSE;
 	int err;
@@ -1161,7 +1226,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	struct ixgbe_fdir_filter *node;
 	bool add_node = FALSE;
 
-	if (fdir_mode == RTE_FDIR_MODE_NONE)
+	if (fdir_mode == RTE_FDIR_MODE_NONE ||
+	    fdir_mode != rule->mode)
 		return -ENOTSUP;
 
 	/*
@@ -1174,7 +1240,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	    (hw->mac.type == ixgbe_mac_X550 ||
 	     hw->mac.type == ixgbe_mac_X550EM_x ||
 	     hw->mac.type == ixgbe_mac_X550EM_a) &&
-	    (fdir_filter->input.flow_type ==
+	    (rule->ixgbe_fdir.formatted.flow_type ==
 	     RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
 	    (info->mask.src_port_mask != 0 ||
 	     info->mask.dst_port_mask != 0)) {
@@ -1188,29 +1254,23 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	    fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
 		is_perfect = TRUE;
 
-	memset(&input, 0, sizeof(input));
-
-	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
-					     fdir_mode);
-	if (err)
-		return err;
-
 	if (is_perfect) {
-		if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+		if (rule->ixgbe_fdir.formatted.flow_type &
+		    IXGBE_ATR_L4TYPE_IPV6_MASK) {
 			PMD_DRV_LOG(ERR, "IPv6 is not supported in"
 				    " perfect mode!");
 			return -ENOTSUP;
 		}
-		fdirhash = atr_compute_perfect_hash_82599(&input,
+		fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
 							  dev->data->dev_conf.fdir_conf.pballoc);
-		fdirhash |= fdir_filter->soft_id <<
+		fdirhash |= rule->soft_id <<
 			IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
 	} else
-		fdirhash = atr_compute_sig_hash_82599(&input,
+		fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
 						      dev->data->dev_conf.fdir_conf.pballoc);
 
 	if (del) {
-		err = ixgbe_remove_fdir_filter(info, &input);
+		err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
 		if (err < 0)
 			return err;
 
@@ -1223,7 +1283,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	}
 	/* add or update an fdir filter*/
 	fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
-	if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+	if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
 		if (is_perfect) {
 			queue = dev->data->dev_conf.fdir_conf.drop_queue;
 			fdircmd_flags |= IXGBE_FDIRCMD_DROP;
@@ -1232,13 +1292,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 				    " signature mode.");
 			return -EINVAL;
 		}
-	} else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
-		   fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
-		queue = (uint8_t)fdir_filter->action.rx_queue;
+	} else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
+		queue = (uint8_t)rule->queue;
 	else
 		return -EINVAL;
 
-	node = ixgbe_fdir_filter_lookup(info, &input);
+	node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
 	if (node) {
 		if (update) {
 			node->fdirflags = fdircmd_flags;
@@ -1256,7 +1315,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 		if (!node)
 			return -ENOMEM;
 		(void)rte_memcpy(&node->ixgbe_fdir,
-				 &input,
+				 &rule->ixgbe_fdir,
 				 sizeof(union ixgbe_atr_input));
 		node->fdirflags = fdircmd_flags;
 		node->fdirhash = fdirhash;
@@ -1270,18 +1329,19 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	}
 
 	if (is_perfect) {
-		err = fdir_write_perfect_filter_82599(hw, &input, queue,
-						      fdircmd_flags, fdirhash,
-						      fdir_mode);
+		err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
+						      queue, fdircmd_flags,
+						      fdirhash, fdir_mode);
 	} else {
-		err = fdir_add_signature_filter_82599(hw, &input, queue,
-						      fdircmd_flags, fdirhash);
+		err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
+						      queue, fdircmd_flags,
+						      fdirhash);
 	}
 	if (err < 0) {
 		PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
 
 		if (add_node)
-			(void)ixgbe_remove_fdir_filter(info, &input);
+			(void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
 	} else {
 		PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
 	}
@@ -1289,6 +1349,29 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 	return err;
 }
 
+/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+			  const struct rte_eth_fdir_filter *fdir_filter,
+			  bool del,
+			  bool update)
+{
+	struct ixgbe_fdir_rule rule;
+	int err;
+
+	err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+
+	if (err)
+		return err;
+
+	return ixgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
 static int
 ixgbe_fdir_flush(struct rte_eth_dev *dev)
 {
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 16/18] net/ixgbe: create consistent filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (14 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 15/18] net/ixgbe: parse flow director filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-20 17:25   ` Ferruh Yigit
  2016-12-02 10:43 ` [PATCH 17/18] net/ixgbe: destroy " Wei Zhao
  2016-12-02 10:43 ` [PATCH 18/18] net/ixgbe: flush " Wei Zhao
  17 siblings, 1 reply; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

This patch adds a function to create the flow directory filter.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 222 ++++++++++++++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h |   6 ++
 2 files changed, 227 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 75fadb0..b93c81d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -450,6 +450,11 @@ int ixgbe_flow_validate(struct rte_eth_dev *dev,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
+struct ixgbe_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -837,11 +842,48 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
 
 static const struct rte_flow_ops ixgbe_flow_ops = {
 	ixgbe_flow_validate,
+	(void *)ixgbe_flow_create,
 	NULL,
 	NULL,
 	NULL,
-	NULL,
 };
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+	TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+	struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+	TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+	struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+	TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+	struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+	TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+	struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+	TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+	struct rte_eth_l2_tunnel_conf filter_info;
+};
+
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+struct ixgbe_ntuple_filter_list filter_ntuple_list;
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+struct ixgbe_ethertype_filter_list filter_ethertype_list;
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+struct ixgbe_syn_filter_list filter_syn_list;
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
 
 /**
  * Atomically reads the link status information from global
@@ -1428,6 +1470,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 	l2_tn_info->e_tag_en = FALSE;
 	l2_tn_info->e_tag_fwd_en = FALSE;
 	l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+	TAILQ_INIT(&filter_ntuple_list);
+	TAILQ_INIT(&filter_ethertype_list);
+	TAILQ_INIT(&filter_syn_list);
+	TAILQ_INIT(&filter_fdir_list);
+	TAILQ_INIT(&filter_l2_tunnel_list);
 
 	return 0;
 }
@@ -9601,6 +9648,179 @@ ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
 
 	return ret;
 }
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+struct ixgbe_flow *
+ixgbe_flow_create(struct rte_eth_dev *dev,
+		  const struct rte_flow_attr *attr,
+		  const struct rte_flow_item pattern[],
+		  const struct rte_flow_action actions[],
+		  struct rte_flow_error *error)
+{
+	int ret;
+	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_syn_filter syn_filter;
+	struct ixgbe_fdir_rule fdir_rule;
+	struct rte_eth_l2_tunnel_conf l2_tn_filter;
+	struct ixgbe_hw_fdir_info *fdir_info =
+		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	struct ixgbe_flow *flow = NULL;
+	struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+	struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+	struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+	struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+	struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+
+	flow = rte_zmalloc("ixgbe_flow", sizeof(struct ixgbe_flow), 0);
+	if (!flow) {
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
+		return flow;
+	}
+
+	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	ret = ixgbe_parse_ntuple_filter(attr, pattern,
+			actions, &ntuple_filter);
+	error->type = ret;
+	if (!ret) {
+
+		ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+		if (!ret) {
+			ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
+				sizeof(struct ixgbe_ntuple_filter_ele), 0);
+			(void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+				&ntuple_filter,
+				sizeof(struct rte_eth_ntuple_filter));
+			TAILQ_INSERT_TAIL(&filter_ntuple_list,
+				ntuple_filter_ptr, entries);
+			flow->rule = ntuple_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+		}
+		return flow;
+	}
+
+	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+	ret = ixgbe_parse_ethertype_filter(attr, pattern,
+				actions, &ethertype_filter);
+	error->type = ret;
+	if (!ret) {
+		ret = ixgbe_add_del_ethertype_filter(dev,
+				&ethertype_filter, TRUE);
+		if (!ret) {
+			ethertype_filter_ptr = rte_zmalloc(
+				"ixgbe_ethertype_filter",
+				sizeof(struct ixgbe_ethertype_filter_ele), 0);
+			(void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+				&ethertype_filter,
+				sizeof(struct rte_eth_ethertype_filter));
+			TAILQ_INSERT_TAIL(&filter_ethertype_list,
+				ethertype_filter_ptr, entries);
+			flow->rule = ethertype_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+		}
+		return flow;
+	}
+
+	memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+	ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter);
+	error->type = ret;
+	if (!ret) {
+		ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
+		if (!ret) {
+			syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
+				sizeof(struct ixgbe_eth_syn_filter_ele), 0);
+			(void)rte_memcpy(&syn_filter_ptr->filter_info,
+				&syn_filter,
+				sizeof(struct rte_eth_syn_filter));
+			TAILQ_INSERT_TAIL(&filter_syn_list,
+				syn_filter_ptr,
+				entries);
+			flow->rule = syn_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_SYN;
+		}
+		return flow;
+	}
+
+	memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+	ret = ixgbe_parse_fdir_filter(attr, pattern, actions, &fdir_rule);
+	error->type = ret;
+	if (!ret) {
+		/* A mask cannot be deleted. */
+		if (fdir_rule.b_mask) {
+			if (!fdir_info->mask_added) {
+				/* It's the first time the mask is set. */
+				rte_memcpy(&fdir_info->mask,
+					&fdir_rule.mask,
+					sizeof(struct ixgbe_hw_fdir_mask));
+				ret = ixgbe_fdir_set_input_mask(dev);
+				if (ret)
+					return NULL;
+
+				fdir_info->mask_added = TRUE;
+			} else {
+				/**
+				 * Only support one global mask,
+				 * all the masks should be the same.
+				 */
+				ret = memcmp(&fdir_info->mask,
+					&fdir_rule.mask,
+					sizeof(struct ixgbe_hw_fdir_mask));
+				if (ret)
+					return NULL;
+			}
+		}
+
+		if (fdir_rule.b_spec) {
+			ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
+					FALSE, FALSE);
+			if (!ret) {
+				fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
+					sizeof(struct ixgbe_fdir_rule_ele), 0);
+				(void)rte_memcpy(&fdir_rule_ptr->filter_info,
+					&fdir_rule,
+					sizeof(struct ixgbe_fdir_rule));
+				TAILQ_INSERT_TAIL(&filter_fdir_list,
+					fdir_rule_ptr, entries);
+				flow->rule = fdir_rule_ptr;
+				flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+				return flow;
+			}
+
+			if (ret)
+				return NULL;
+		}
+
+		return NULL;
+	}
+
+	memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+	ret = cons_parse_l2_tn_filter(attr, pattern, actions, &l2_tn_filter);
+	error->type = ret;
+	if (!ret) {
+		ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+		if (!ret) {
+			l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
+				sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
+			(void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
+				&l2_tn_filter,
+				sizeof(struct rte_eth_l2_tunnel_conf));
+			TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+				l2_tn_filter_ptr, entries);
+			flow->rule = l2_tn_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+
+			return flow;
+		}
+	}
+
+	rte_free(flow);
+	return NULL;
+}
 
 /* Check whether a flow rule can be created on ixgbe. */
 int
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index d23ad67..da0fab5 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -329,6 +329,12 @@ struct ixgbe_l2_tn_info {
 	bool e_tag_ether_type; /* ether type for e-tag */
 };
 
+struct ixgbe_flow {
+	enum rte_filter_type filter_type;
+	void *rule;
+};
+
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 17/18] net/ixgbe: destroy consistent filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (15 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 16/18] net/ixgbe: create consistent filter Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  2016-12-02 10:43 ` [PATCH 18/18] net/ixgbe: flush " Wei Zhao
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

This patch adds a function to create the flow directory filter.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 110 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 109 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index b93c81d..23efc57 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -455,6 +455,9 @@ struct ixgbe_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
+int ixgbe_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -843,7 +846,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
 static const struct rte_flow_ops ixgbe_flow_ops = {
 	ixgbe_flow_validate,
 	(void *)ixgbe_flow_create,
-	NULL,
+	ixgbe_flow_destroy,
 	NULL,
 	NULL,
 };
@@ -9838,6 +9841,111 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
 
 }
 
+/* Destroy a flow rule on ixgbe. */
+int
+ixgbe_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	int ret;
+	struct ixgbe_flow *pmd_flow = (struct ixgbe_flow *)flow;
+	enum rte_filter_type filter_type = pmd_flow->filter_type;
+	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_syn_filter syn_filter;
+	struct ixgbe_fdir_rule fdir_rule;
+	struct rte_eth_l2_tunnel_conf l2_tn_filter;
+	struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+	struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+	struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+	struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+	struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_NTUPLE:
+		ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
+					pmd_flow->rule;
+		(void)rte_memcpy(&ntuple_filter,
+			&ntuple_filter_ptr->filter_info,
+			sizeof(struct rte_eth_ntuple_filter));
+		ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+		if (!ret) {
+			TAILQ_REMOVE(&filter_ntuple_list,
+			ntuple_filter_ptr,
+			entries);
+			rte_free(ntuple_filter_ptr);
+			rte_free(flow);
+		}
+		break;
+	case RTE_ETH_FILTER_ETHERTYPE:
+		ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
+					pmd_flow->rule;
+		(void)rte_memcpy(&ethertype_filter,
+			&ethertype_filter_ptr->filter_info,
+			sizeof(struct rte_eth_ethertype_filter));
+		ret = ixgbe_add_del_ethertype_filter(dev,
+				&ethertype_filter, FALSE);
+		if (!ret) {
+			TAILQ_REMOVE(&filter_ethertype_list,
+				ethertype_filter_ptr, entries);
+			rte_free(ethertype_filter_ptr);
+			rte_free(flow);
+		}
+		break;
+	case RTE_ETH_FILTER_SYN:
+		syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
+				pmd_flow->rule;
+		(void)rte_memcpy(&syn_filter,
+			&syn_filter_ptr->filter_info,
+			sizeof(struct rte_eth_syn_filter));
+		ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
+		if (!ret) {
+			TAILQ_REMOVE(&filter_syn_list,
+				syn_filter_ptr, entries);
+			rte_free(syn_filter_ptr);
+			rte_free(flow);
+		}
+		break;
+	case RTE_ETH_FILTER_FDIR:
+		fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
+		(void)rte_memcpy(&fdir_rule,
+			&fdir_rule_ptr->filter_info,
+			sizeof(struct ixgbe_fdir_rule));
+		ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+		if (!ret) {
+			TAILQ_REMOVE(&filter_fdir_list,
+				fdir_rule_ptr, entries);
+			rte_free(fdir_rule_ptr);
+			rte_free(flow);
+		}
+		break;
+	case RTE_ETH_FILTER_L2_TUNNEL:
+		l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
+				pmd_flow->rule;
+		(void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+			sizeof(struct rte_eth_l2_tunnel_conf));
+		ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+		if (!ret) {
+			TAILQ_REMOVE(&filter_l2_tunnel_list,
+				l2_tn_filter_ptr, entries);
+			rte_free(l2_tn_filter_ptr);
+			rte_free(flow);
+		}
+
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+			    filter_type);
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		error->type = RTE_FLOW_ERROR_TYPE_HANDLE;
+
+	return ret;
+
+
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 18/18] net/ixgbe: flush consistent filter
  2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
                   ` (16 preceding siblings ...)
  2016-12-02 10:43 ` [PATCH 17/18] net/ixgbe: destroy " Wei Zhao
@ 2016-12-02 10:43 ` Wei Zhao
  17 siblings, 0 replies; 36+ messages in thread
From: Wei Zhao @ 2016-12-02 10:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei zhao1

From: wei zhao1 <wei.zhao1@intel.com>

This patch adds a function to flush all the flow directory
filter on a port.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 20 +++++++++++++++++++-
 1 file changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 23efc57..e420a3f 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -458,6 +458,8 @@ struct ixgbe_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
 int ixgbe_flow_destroy(struct rte_eth_dev *dev,
 		struct rte_flow *flow,
 		struct rte_flow_error *error);
+int ixgbe_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -847,7 +849,7 @@ static const struct rte_flow_ops ixgbe_flow_ops = {
 	ixgbe_flow_validate,
 	(void *)ixgbe_flow_create,
 	ixgbe_flow_destroy,
-	NULL,
+	ixgbe_flow_flush,
 	NULL,
 };
 /* ntuple filter list structure */
@@ -9946,6 +9948,22 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
 
 }
 
+/*  Destroy all flow rules associated with a port on ixgbe. */
+int
+ixgbe_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+
+	error->type = RTE_FLOW_ERROR_TYPE_NONE;
+	if((ret = ixgbe_flush_all_filter(dev)) != 0) {
+		error->type = RTE_FLOW_ERROR_TYPE_HANDLE;
+		return ret;
+	}
+
+	return ret;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* Re: [PATCH 01/18] net/ixgbe: store SYN filter
  2016-12-02 10:42 ` [PATCH 01/18] net/ixgbe: store SYN filter Wei Zhao
@ 2016-12-20 16:55   ` Ferruh Yigit
  2016-12-22  9:48     ` Zhao1, Wei
  2016-12-26  1:47     ` Zhao1, Wei
  0 siblings, 2 replies; 36+ messages in thread
From: Ferruh Yigit @ 2016-12-20 16:55 UTC (permalink / raw)
  To: Wei Zhao, dev; +Cc: wenzhuo.lu

On 12/2/2016 10:42 AM, Wei Zhao wrote:
> From: wei zhao1 <wei.zhao1@intel.com>
> 
> Add support for storing SYN filter in SW.

Do you think does it makes more clear to refer as TCP SYN filter? Or SYN
filter is clear enough?

> 
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>

Can you please update sign-off to your actual name?

> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c | 12 ++++++++++--
>  drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
>  2 files changed, 12 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index edc9b22..7f10cca 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -1287,6 +1287,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>  	memset(filter_info->fivetuple_mask, 0,
>  	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
>  
> +	/* initialize SYN filter */
> +	filter_info->syn_info = 0;

can it be an option to memset all filter_info? (and of course move list
init after memset)

>  	return 0;
>  }
>  
> @@ -5509,15 +5511,19 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
>  			bool add)
>  {
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ixgbe_filter_info *filter_info =
> +		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
> +	uint32_t syn_info;
>  	uint32_t synqf;
>  
>  	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
>  		return -EINVAL;
>  
> +	syn_info = filter_info->syn_info;
>  	synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
>  
>  	if (add) {
> -		if (synqf & IXGBE_SYN_FILTER_ENABLE)
> +		if (syn_info & IXGBE_SYN_FILTER_ENABLE)

If these checks will be done on syn_info, shouldn't syn_info be assigned
to synqf before this. Specially for first usage, synqf may be different
than hw register.

Or perhaps can keep continue to use synqf. Since synqf assigned to
filter_info->syn_info after updated.

>  			return -EINVAL;
>  		synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
>  			IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
> @@ -5527,10 +5533,12 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
>  		else
>  			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
>  	} else {
> -		if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
> +		if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
>  			return -ENOENT;
>  		synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
>  	}
> +
> +	filter_info->syn_info = synqf;
>  	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
>  	IXGBE_WRITE_FLUSH(hw);
>  	return 0;
<...>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 02/18] net/ixgbe: store flow director filter
  2016-12-02 10:42 ` [PATCH 02/18] net/ixgbe: store flow director filter Wei Zhao
@ 2016-12-20 16:58   ` Ferruh Yigit
  2016-12-26  2:50     ` Zhao1, Wei
  0 siblings, 1 reply; 36+ messages in thread
From: Ferruh Yigit @ 2016-12-20 16:58 UTC (permalink / raw)
  To: Wei Zhao, dev; +Cc: wenzhuo.lu

On 12/2/2016 10:42 AM, Wei Zhao wrote:
> From: wei zhao1 <wei.zhao1@intel.com>
> 
> Add support for storing flow director filter in SW.
> 
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c |  48 ++++++++++++++++++
>  drivers/net/ixgbe/ixgbe_ethdev.h |  19 ++++++-
>  drivers/net/ixgbe/ixgbe_fdir.c   | 105 ++++++++++++++++++++++++++++++++++++++-
>  3 files changed, 169 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 7f10cca..f8e5fe1 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
<...>
> @@ -1289,6 +1302,25 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>  
>  	/* initialize SYN filter */
>  	filter_info->syn_info = 0;
> +	/* initialize flow director filter list & hash */
> +	TAILQ_INIT(&fdir_info->fdir_list);
> +	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
> +		 "fdir_%s", eth_dev->data->name);
> +	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);

Do we really create a hash table in device init? Is there a way to do
this if we know user will use it?

> +	if (!fdir_info->hash_handle) {
> +		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
> +		return -EINVAL;

And should we exit here? What if user will not use flow director at all?

> +	}
> +	fdir_info->hash_map = rte_zmalloc("ixgbe",
> +					  sizeof(struct ixgbe_fdir_filter *) *
> +					  IXGBE_MAX_FDIR_FILTER_NUM,
> +					  0);
> +	if (!fdir_info->hash_map) {
> +		PMD_INIT_LOG(ERR,
> +			     "Failed to allocate memory for fdir hash map!");
> +		return -ENOMEM;
> +	}
> +

Can you please extract these into functions, to have more clear
init/uninit functions?

>  	return 0;
>  }
>  
> @@ -1297,6 +1329,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
>  {
>  	struct rte_pci_device *pci_dev;
>  	struct ixgbe_hw *hw;
> +	struct ixgbe_hw_fdir_info *fdir_info =
> +		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
> +	struct ixgbe_fdir_filter *fdir_filter;
>  
>  	PMD_INIT_FUNC_TRACE();
>  
> @@ -1330,6 +1365,19 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
>  	rte_free(eth_dev->data->hash_mac_addrs);
>  	eth_dev->data->hash_mac_addrs = NULL;
>  
> +	/* remove all the fdir filters & hash */
> +	if (fdir_info->hash_map)
> +		rte_free(fdir_info->hash_map);
> +	if (fdir_info->hash_handle)
> +		rte_hash_free(fdir_info->hash_handle);

All rte_hash_xxx() APIs gives build error for shared library build, [1]
needs to be added into Makefile.

But, this makes hash library a dependency to the PMD, do you think is
there a way to escape from this?

[1]
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net lib/librte_hash

> +
> +	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
> +		TAILQ_REMOVE(&fdir_info->fdir_list,
> +			     fdir_filter,
> +			     entries);
> +		rte_free(fdir_filter);
> +	}
> +
>  	return 0;
>  }
>  
<...>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 04/18] net/ixgbe: restore n-tuple filter
  2016-12-02 10:43 ` [PATCH 04/18] net/ixgbe: restore n-tuple filter Wei Zhao
@ 2016-12-20 16:58   ` Ferruh Yigit
  2016-12-26  3:32     ` Zhao1, Wei
  0 siblings, 1 reply; 36+ messages in thread
From: Ferruh Yigit @ 2016-12-20 16:58 UTC (permalink / raw)
  To: Zhao1, Wei, dev; +Cc: Lu, Wenzhuo

On 12/2/2016 10:43 AM, Wei Zhao wrote:
> From: wei zhao1 <wei.zhao1@intel.com>
> 
> Add support for restoring n-tuple filter in SW.
> 
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c | 131 +++++++++++++++++++++++++--------------
>  1 file changed, 83 insertions(+), 48 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
<...>
> @@ -2482,6 +2496,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
>  
>  	/* resume enabled intr since hw reset */
>  	ixgbe_enable_intr(dev);
> +	ixgbe_filter_restore(dev);

Just to double check, when you stop the device does 5tuple_filter reset?
If not reset, will adding same filters cause any problem?

>  
>  	return 0;
>  
<...>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 15/18] net/ixgbe: parse flow director filter
  2016-12-02 10:43 ` [PATCH 15/18] net/ixgbe: parse flow director filter Wei Zhao
@ 2016-12-20 17:00   ` Ferruh Yigit
  2016-12-22  9:19     ` Zhao1, Wei
  0 siblings, 1 reply; 36+ messages in thread
From: Ferruh Yigit @ 2016-12-20 17:00 UTC (permalink / raw)
  To: Wei Zhao, dev; +Cc: wenzhuo.lu

On 12/2/2016 10:43 AM, Wei Zhao wrote:
> From: wei zhao1 <wei.zhao1@intel.com>
> 
> check if the rule is a flow director rule, and get the flow director info.
> 
> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> ---

<...>

> +	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
> +			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
> +	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {

This gives build error [1], there are a few more same usage:

.../drivers/net/ixgbe/ixgbe_ethdev.c:9238:17: error: comparison of
constant 241 with expression of type 'const enum rte_flow_item_type' is
always true [-Werror,-Wtautological-constant-out-of-range-compare]
            item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 11/18] net/ixgbe: parse n-tuple filter
  2016-12-02 10:43 ` [PATCH 11/18] net/ixgbe: parse n-tuple filter Wei Zhao
@ 2016-12-20 17:23   ` Ferruh Yigit
  0 siblings, 0 replies; 36+ messages in thread
From: Ferruh Yigit @ 2016-12-20 17:23 UTC (permalink / raw)
  To: Wei Zhao, dev; +Cc: wenzhuo.lu

On 12/2/2016 10:43 AM, Wei Zhao wrote:
> From: wei zhao1 <wei.zhao1@intel.com>
> 
> Add rule validate function and check if the rule is a n-tuple rule,
> and get the n-tuple info.
> 
> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c | 349 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 349 insertions(+)

<...>

> + */
> +static enum rte_flow_error_type
> +cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
> +			 const struct rte_flow_item pattern[],
> +			 const struct rte_flow_action actions[],
> +			 struct rte_eth_ntuple_filter *filter)
> +{

<...>

> +
> +	return 0;

This gives build error [1] with ICC, there are a few same usage, return
type is enum but returning an int.

[1]
/tmp/dpdk_maintain/ixgbe_flow/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c(8432):
error #188: enumerated type mixed with another type
        return 0;
               ^

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 16/18] net/ixgbe: create consistent filter
  2016-12-02 10:43 ` [PATCH 16/18] net/ixgbe: create consistent filter Wei Zhao
@ 2016-12-20 17:25   ` Ferruh Yigit
  2016-12-23  6:26     ` Zhao1, Wei
  0 siblings, 1 reply; 36+ messages in thread
From: Ferruh Yigit @ 2016-12-20 17:25 UTC (permalink / raw)
  To: Wei Zhao, dev; +Cc: wenzhuo.lu

On 12/2/2016 10:43 AM, Wei Zhao wrote:
> From: wei zhao1 <wei.zhao1@intel.com>
> 
> This patch adds a function to create the flow directory filter.
> 
> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

<...>

> +/**
> + * Create or destroy a flow rule.
> + * Theorically one rule can match more than one filters.
> + * We will let it use the filter which it hitt first.
> + * So, the sequence matters.
> + */
> +struct ixgbe_flow *
> +ixgbe_flow_create(struct rte_eth_dev *dev,
> +		  const struct rte_flow_attr *attr,
> +		  const struct rte_flow_item pattern[],
> +		  const struct rte_flow_action actions[],
> +		  struct rte_flow_error *error)
> +{
> +	int ret;
> +	struct rte_eth_ntuple_filter ntuple_filter;
<...>
> +	error->type = ret;

This also returns same ICC error, there are a few more same usage:

.../drivers/net/ixgbe/ixgbe_ethdev.c(9764): error #188: enumerated type
mixed with another type
        error->type = ret;
                    ^

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 15/18] net/ixgbe: parse flow director filter
  2016-12-20 17:00   ` Ferruh Yigit
@ 2016-12-22  9:19     ` Zhao1, Wei
  2016-12-22 10:44       ` Ferruh Yigit
  0 siblings, 1 reply; 36+ messages in thread
From: Zhao1, Wei @ 2016-12-22  9:19 UTC (permalink / raw)
  To: Yigit, Ferruh, dev; +Cc: Lu, Wenzhuo

Hi, Yigit

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, December 21, 2016 1:01 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 15/18] net/ixgbe: parse flow director filter
> 
> On 12/2/2016 10:43 AM, Wei Zhao wrote:
> > From: wei zhao1 <wei.zhao1@intel.com>
> >
> > check if the rule is a flow director rule, and get the flow director info.
> >
> > Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > ---
> 
> <...>
> 
> > +	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
> > +			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
> > +	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> 
> This gives build error [1], there are a few more same usage:
> 
> .../drivers/net/ixgbe/ixgbe_ethdev.c:9238:17: error: comparison of constant
> 241 with expression of type 'const enum rte_flow_item_type' is always true
> [-Werror,-Wtautological-constant-out-of-range-compare]
>             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> 
> 
> 

Ok, I will add two type definition RTE_FLOW_ITEM_TYPE_NVGRE and RTE_FLOW_ITEM_TYPE_E_TAG  into const enum rte_flow_item_type to eliminate this problem.
Thank you.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 01/18] net/ixgbe: store SYN filter
  2016-12-20 16:55   ` Ferruh Yigit
@ 2016-12-22  9:48     ` Zhao1, Wei
  2017-01-06 16:26       ` Ferruh Yigit
  2016-12-26  1:47     ` Zhao1, Wei
  1 sibling, 1 reply; 36+ messages in thread
From: Zhao1, Wei @ 2016-12-22  9:48 UTC (permalink / raw)
  To: Yigit, Ferruh, dev; +Cc: Lu, Wenzhuo

Hi, Yigit

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, December 21, 2016 12:56 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 01/18] net/ixgbe: store SYN filter
> 
> On 12/2/2016 10:42 AM, Wei Zhao wrote:
> > From: wei zhao1 <wei.zhao1@intel.com>
> >
> > Add support for storing SYN filter in SW.
> 
> Do you think does it makes more clear to refer as TCP SYN filter? Or SYN filter
> is clear enough?
> 

Ok, I will change to " TCP SYN filter " to make it more clear

> >
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> 
> Can you please update sign-off to your actual name?
>

Ok, I will change to " Signed-off-by: Wei Zhao <wei.zhao1@intel.com>"

> > ---
> >  drivers/net/ixgbe/ixgbe_ethdev.c | 12 ++++++++++--
> > drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
> >  2 files changed, 12 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> > b/drivers/net/ixgbe/ixgbe_ethdev.c
> > index edc9b22..7f10cca 100644
> > --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> > +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> > @@ -1287,6 +1287,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
> >  	memset(filter_info->fivetuple_mask, 0,
> >  	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
> >
> > +	/* initialize SYN filter */
> > +	filter_info->syn_info = 0;
> 
> can it be an option to memset all filter_info? (and of course move list init
> after memset)
> 

Maybe, change to the following code?

memset(filter_info, 0, sizeof(struct ixgbe_filter_info)); 
TAILQ_INIT(&filter_info->fivetuple_list);

But that wiil mix /* initialize ether type filter */ and /* initialize 5tuple filter list */ two process together,
Because  struct ixgbe_filter_info store two type info of ether and 5tuple.
So, not to change ?

struct ixgbe_filter_info {
	uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter */
	/* store used ethertype filters*/
	struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
	/* Bit mask for every used 5tuple filter */
	uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
	struct ixgbe_5tuple_filter_list fivetuple_list;
	/* store the SYN filter info */
	uint32_t syn_info;
};


> >  	return 0;
> >  }
> >
> > @@ -5509,15 +5511,19 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
> >  			bool add)
> >  {
> >  	struct ixgbe_hw *hw =
> > IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +	struct ixgbe_filter_info *filter_info =
> > +		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data-
> >dev_private);
> > +	uint32_t syn_info;
> >  	uint32_t synqf;
> >
> >  	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
> >  		return -EINVAL;
> >
> > +	syn_info = filter_info->syn_info;
> >  	synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
> >
> >  	if (add) {
> > -		if (synqf & IXGBE_SYN_FILTER_ENABLE)
> > +		if (syn_info & IXGBE_SYN_FILTER_ENABLE)
> 
> If these checks will be done on syn_info, shouldn't syn_info be assigned to
> synqf before this. Specially for first usage, synqf may be different than hw
> register.
> 
> Or perhaps can keep continue to use synqf. Since synqf assigned to
> filter_info->syn_info after updated.
> 

Let me have a deeper think of this to reply you.


> >  			return -EINVAL;
> >  		synqf = (uint32_t)(((filter->queue <<
> IXGBE_SYN_FILTER_QUEUE_SHIFT) &
> >  			IXGBE_SYN_FILTER_QUEUE) |
> IXGBE_SYN_FILTER_ENABLE); @@ -5527,10
> > +5533,12 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
> >  		else
> >  			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
> >  	} else {
> > -		if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
> > +		if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
> >  			return -ENOENT;
> >  		synqf &= ~(IXGBE_SYN_FILTER_QUEUE |
> IXGBE_SYN_FILTER_ENABLE);
> >  	}
> > +
> > +	filter_info->syn_info = synqf;
> >  	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
> >  	IXGBE_WRITE_FLUSH(hw);
> >  	return 0;
> <...>
> 

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 15/18] net/ixgbe: parse flow director filter
  2016-12-22  9:19     ` Zhao1, Wei
@ 2016-12-22 10:44       ` Ferruh Yigit
  2016-12-23  8:13         ` Adrien Mazarguil
  0 siblings, 1 reply; 36+ messages in thread
From: Ferruh Yigit @ 2016-12-22 10:44 UTC (permalink / raw)
  To: Zhao1, Wei, dev; +Cc: Lu, Wenzhuo, Adrien Mazarguil

On 12/22/2016 9:19 AM, Zhao1, Wei wrote:
> Hi, Yigit
> 
>> -----Original Message-----
>> From: Yigit, Ferruh
>> Sent: Wednesday, December 21, 2016 1:01 AM
>> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
>> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
>> Subject: Re: [dpdk-dev] [PATCH 15/18] net/ixgbe: parse flow director filter
>>
>> On 12/2/2016 10:43 AM, Wei Zhao wrote:
>>> From: wei zhao1 <wei.zhao1@intel.com>
>>>
>>> check if the rule is a flow director rule, and get the flow director info.
>>>
>>> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
>>> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
>>> ---
>>
>> <...>
>>
>>> +	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
>>> +			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
>>> +	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
>>> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
>>> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
>>> +	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
>>> +	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
>>> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
>>
>> This gives build error [1], there are a few more same usage:
>>
>> .../drivers/net/ixgbe/ixgbe_ethdev.c:9238:17: error: comparison of constant
>> 241 with expression of type 'const enum rte_flow_item_type' is always true
>> [-Werror,-Wtautological-constant-out-of-range-compare]
>>             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
>>
>>
>>
> 
> Ok, I will add two type definition RTE_FLOW_ITEM_TYPE_NVGRE and RTE_FLOW_ITEM_TYPE_E_TAG  into const enum rte_flow_item_type to eliminate this problem.
> Thank you.
> 

CC: Adrien Mazarguil <adrien.mazarguil@6wind.com>

Yes, that is what right thing to do, since rte_flow patchset not merged
yet, perhaps Adrien may want to include this as next version of his
patchset?

What do you think Adrien?

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 16/18] net/ixgbe: create consistent filter
  2016-12-20 17:25   ` Ferruh Yigit
@ 2016-12-23  6:26     ` Zhao1, Wei
  0 siblings, 0 replies; 36+ messages in thread
From: Zhao1, Wei @ 2016-12-23  6:26 UTC (permalink / raw)
  To: Yigit, Ferruh, dev; +Cc: Lu, Wenzhuo

Hi, Yigit

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, December 21, 2016 1:25 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 16/18] net/ixgbe: create consistent filter
> 
> On 12/2/2016 10:43 AM, Wei Zhao wrote:
> > From: wei zhao1 <wei.zhao1@intel.com>
> >
> > This patch adds a function to create the flow directory filter.
> >
> > Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> 
> <...>
> 
> > +/**
> > + * Create or destroy a flow rule.
> > + * Theorically one rule can match more than one filters.
> > + * We will let it use the filter which it hitt first.
> > + * So, the sequence matters.
> > + */
> > +struct ixgbe_flow *
> > +ixgbe_flow_create(struct rte_eth_dev *dev,
> > +		  const struct rte_flow_attr *attr,
> > +		  const struct rte_flow_item pattern[],
> > +		  const struct rte_flow_action actions[],
> > +		  struct rte_flow_error *error)
> > +{
> > +	int ret;
> > +	struct rte_eth_ntuple_filter ntuple_filter;
> <...>
> > +	error->type = ret;
> 
> This also returns same ICC error, there are a few more same usage:
> 
> .../drivers/net/ixgbe/ixgbe_ethdev.c(9764): error #188: enumerated type
> mixed with another type
>         error->type = ret;
>                     ^
> 
> 
> 
Thank you for warning,  I will use ICC and gcc two kinds of tool to build my patch in v2 later. 

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 15/18] net/ixgbe: parse flow director filter
  2016-12-22 10:44       ` Ferruh Yigit
@ 2016-12-23  8:13         ` Adrien Mazarguil
  2016-12-27  3:31           ` Zhao1, Wei
  0 siblings, 1 reply; 36+ messages in thread
From: Adrien Mazarguil @ 2016-12-23  8:13 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Zhao1, Wei, dev, Lu, Wenzhuo

Hi,

On Thu, Dec 22, 2016 at 10:44:32AM +0000, Ferruh Yigit wrote:
> On 12/22/2016 9:19 AM, Zhao1, Wei wrote:
> > Hi, Yigit
> > 
> >> -----Original Message-----
> >> From: Yigit, Ferruh
> >> Sent: Wednesday, December 21, 2016 1:01 AM
> >> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> >> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> >> Subject: Re: [dpdk-dev] [PATCH 15/18] net/ixgbe: parse flow director filter
> >>
> >> On 12/2/2016 10:43 AM, Wei Zhao wrote:
> >>> From: wei zhao1 <wei.zhao1@intel.com>
> >>>
> >>> check if the rule is a flow director rule, and get the flow director info.
> >>>
> >>> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> >>> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> >>> ---
> >>
> >> <...>
> >>
> >>> +	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
> >>> +			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
> >>> +	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> >>> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
> >>> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> >>> +	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> >>> +	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> >>> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> >>
> >> This gives build error [1], there are a few more same usage:
> >>
> >> .../drivers/net/ixgbe/ixgbe_ethdev.c:9238:17: error: comparison of constant
> >> 241 with expression of type 'const enum rte_flow_item_type' is always true
> >> [-Werror,-Wtautological-constant-out-of-range-compare]
> >>             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> >>
> >>
> >>
> > 
> > Ok, I will add two type definition RTE_FLOW_ITEM_TYPE_NVGRE and RTE_FLOW_ITEM_TYPE_E_TAG  into const enum rte_flow_item_type to eliminate this problem.
> > Thank you.
> > 
> 
> CC: Adrien Mazarguil <adrien.mazarguil@6wind.com>

Thanks, the original message did not catch my attention.

> Yes, that is what right thing to do, since rte_flow patchset not merged
> yet, perhaps Adrien may want to include this as next version of his
> patchset?
> 
> What do you think Adrien?

I think by now the rte_flow series is automatically categorized as spam by
half the community already, and that new items such as these can be added
subsequently on their own, ideally before the entire ixgbe/i40e series.

I have a few comments regarding these new items if we want to make them part
of rte_flow.h (see definitions below).

Unfortunately, even though they are super convenient to use and expose in the
testpmd flow command, we need to avoid C-style bit-field definitions such as
these for protocol header matching because they are not endian-safe,
particularly multi-byte fields. Only meta-data that should be interpreted
with host endianness can use them (e.g. struct rte_flow_attr, struct
rte_flow_action_vf, etc):

 struct rte_flow_item_nvgre {
        uint32_t flags0:1; /**< 0 */
        uint32_t rsvd1:1; /**< 1 bit not defined */
        uint32_t flags1:2; /**< 2 bits, 1 0 */
        uint32_t rsvd0:9; /**< Reserved0 */
        uint32_t ver:3; /**< version */
        uint32_t protocol:16; /**< protocol type, 0x6558 */
        uint32_t tni:24; /**< tenant network ID or virtual subnet ID */
        uint32_t flow_id:8; /**< flow ID or Reserved */
 };

For an example how to avoid them, see struct ipv6_hdr definition in
rte_ip.h, where field vtc_flow is 32 bit but covers three protocol fields
and is considered big-endian (Nelio's endianness series [1] would be really
handy to eliminate confusion here). Also see struct rte_flow_item_vxlan,
which covers 24-bit fields using uint8_t arrays.

 struct rte_flow_item_e_tag {
        struct ether_addr dst; /**< Destination MAC. */
        struct ether_addr src; /**< Source MAC. */
        uint16_t e_tag_ethertype; /**< E-tag EtherType, 0x893F. */
        uint16_t e_pcp:3; /**<  E-PCP */
        uint16_t dei:1; /**< DEI */
        uint16_t in_e_cid_base:12; /**< Ingress E-CID base */
        uint16_t rsv:2; /**< reserved */
        uint16_t grp:2; /**< GRP */
        uint16_t e_cid_base:12; /**< E-CID base */
        uint16_t in_e_cid_ext:8; /**< Ingress E-CID extend */
        uint16_t e_cid_ext:8; /**< E-CID extend */
        uint16_t type; /**< MAC type. */
        unsigned int tags; /**< Number of 802.1Q/ad tags defined. */
        struct {
                uint16_t tpid; /**< Tag protocol identifier. */
                uint16_t tci; /**< Tag control information. */
        } tag[]; /**< 802.1Q/ad tag definitions, outermost first. */
 };

Besides the bit-field issue for this one, looks like it should be split, at
least the initial part is already covered by rte_flow_item_eth.

I do not know much about E-Tag (IEEE 802.1BR right?) but it sort of looks
like a 2.5 layer protocol reminiscent of VLAN.

tags and tag[] fields seem based on the VLAN definition of the original
rte_flow RFC that has since been replaced with stacked rte_flow_item_vlan
items, much easier to program for. Perhaps this can be relied on instead of
having e_tag implement its own variant.

As a protocol-matching item and E-Tag TCI being 6 bytes according to IEEE
802.1BR, sizeof(struct rte_flow_item_e_tag) should ideally return 6 as well
and would likely comprise three big-endian uint16_t fields. See how
PCP/DEI/VID VLAN fields are interpreted in testpmd [2].

Again, these concerns only stand if you intend to include these definitions
into rte_flow.h.

[1] http://dpdk.org/ml/archives/dev/2016-November/050060.html
[2] http://dpdk.org/ml/archives/dev/2016-December/052976.html

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 01/18] net/ixgbe: store SYN filter
  2016-12-20 16:55   ` Ferruh Yigit
  2016-12-22  9:48     ` Zhao1, Wei
@ 2016-12-26  1:47     ` Zhao1, Wei
  1 sibling, 0 replies; 36+ messages in thread
From: Zhao1, Wei @ 2016-12-26  1:47 UTC (permalink / raw)
  To: Yigit, Ferruh, dev; +Cc: Lu, Wenzhuo

Hi, Ferruh

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, December 21, 2016 12:56 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 01/18] net/ixgbe: store SYN filter
> 
> On 12/2/2016 10:42 AM, Wei Zhao wrote:
> > From: wei zhao1 <wei.zhao1@intel.com>
> >
> > Add support for storing SYN filter in SW.
> 
> Do you think does it makes more clear to refer as TCP SYN filter? Or SYN filter
> is clear enough?
> 
> >
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> 
> Can you please update sign-off to your actual name?
> 
> > ---
> >  drivers/net/ixgbe/ixgbe_ethdev.c | 12 ++++++++++--
> > drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
> >  2 files changed, 12 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> > b/drivers/net/ixgbe/ixgbe_ethdev.c
> > index edc9b22..7f10cca 100644
> > --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> > +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> > @@ -1287,6 +1287,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
> >  	memset(filter_info->fivetuple_mask, 0,
> >  	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
> >
> > +	/* initialize SYN filter */
> > +	filter_info->syn_info = 0;
> 
> can it be an option to memset all filter_info? (and of course move list init
> after memset)
> 
> >  	return 0;
> >  }
> >
> > @@ -5509,15 +5511,19 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
> >  			bool add)
> >  {
> >  	struct ixgbe_hw *hw =
> > IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +	struct ixgbe_filter_info *filter_info =
> > +		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data-
> >dev_private);
> > +	uint32_t syn_info;
> >  	uint32_t synqf;
> >
> >  	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
> >  		return -EINVAL;
> >
> > +	syn_info = filter_info->syn_info;
> >  	synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
> >
> >  	if (add) {
> > -		if (synqf & IXGBE_SYN_FILTER_ENABLE)
> > +		if (syn_info & IXGBE_SYN_FILTER_ENABLE)
> 
> If these checks will be done on syn_info, shouldn't syn_info be assigned to
> synqf before this. Specially for first usage, synqf may be different than hw
> register.
> 
> Or perhaps can keep continue to use synqf. Since synqf assigned to
> filter_info->syn_info after updated.
> 

ok, this code is alittle vague, in "add" branch synqf will be assigned a new value, so "synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF)" is useless.
synqf read from hw only to be used in "else" branch.so I will make a little code change here.
Thank you for your suggestion.  

> >  			return -EINVAL;
> >  		synqf = (uint32_t)(((filter->queue <<
> IXGBE_SYN_FILTER_QUEUE_SHIFT) &
> >  			IXGBE_SYN_FILTER_QUEUE) |
> IXGBE_SYN_FILTER_ENABLE); @@ -5527,10
> > +5533,12 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
> >  		else
> >  			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
> >  	} else {
> > -		if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
> > +		if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
> >  			return -ENOENT;
> >  		synqf &= ~(IXGBE_SYN_FILTER_QUEUE |
> IXGBE_SYN_FILTER_ENABLE);
> >  	}
> > +
> > +	filter_info->syn_info = synqf;
> >  	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
> >  	IXGBE_WRITE_FLUSH(hw);
> >  	return 0;
> <...>
> 

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 02/18] net/ixgbe: store flow director filter
  2016-12-20 16:58   ` Ferruh Yigit
@ 2016-12-26  2:50     ` Zhao1, Wei
  0 siblings, 0 replies; 36+ messages in thread
From: Zhao1, Wei @ 2016-12-26  2:50 UTC (permalink / raw)
  To: Yigit, Ferruh, dev; +Cc: Lu, Wenzhuo

Hi,  Ferruh

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, December 21, 2016 12:58 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 02/18] net/ixgbe: store flow director filter
> 
> On 12/2/2016 10:42 AM, Wei Zhao wrote:
> > From: wei zhao1 <wei.zhao1@intel.com>
> >
> > Add support for storing flow director filter in SW.
> >
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ixgbe/ixgbe_ethdev.c |  48 ++++++++++++++++++
> > drivers/net/ixgbe/ixgbe_ethdev.h |  19 ++++++-
> >  drivers/net/ixgbe/ixgbe_fdir.c   | 105
> ++++++++++++++++++++++++++++++++++++++-
> >  3 files changed, 169 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> > b/drivers/net/ixgbe/ixgbe_ethdev.c
> > index 7f10cca..f8e5fe1 100644
> > --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> > +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> <...>
> > @@ -1289,6 +1302,25 @@ eth_ixgbe_dev_init(struct rte_eth_dev
> *eth_dev)
> >
> >  	/* initialize SYN filter */
> >  	filter_info->syn_info = 0;
> > +	/* initialize flow director filter list & hash */
> > +	TAILQ_INIT(&fdir_info->fdir_list);
> > +	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
> > +		 "fdir_%s", eth_dev->data->name);
> > +	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
> 
> Do we really create a hash table in device init? Is there a way to do this if we
> know user will use it?

By now, we have no idea about whether user wiil use flow director or not.
So, our strategy is init all types of filter and give option to users.

> 
> > +	if (!fdir_info->hash_handle) {
> > +		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
> > +		return -EINVAL;
> 
> And should we exit here? What if user will not use flow director at all?

Maybe, we can delete "return -EINVAL;" and only print error log as a warning.
But some uer maybe do not pay attention to warning error log sometimes, so we chose to 
exit if any init fail.

> 
> > +	}
> > +	fdir_info->hash_map = rte_zmalloc("ixgbe",
> > +					  sizeof(struct ixgbe_fdir_filter *) *
> > +					  IXGBE_MAX_FDIR_FILTER_NUM,
> > +					  0);
> > +	if (!fdir_info->hash_map) {
> > +		PMD_INIT_LOG(ERR,
> > +			     "Failed to allocate memory for fdir hash map!");
> > +		return -ENOMEM;
> > +	}
> > +
> 
> Can you please extract these into functions, to have more clear init/uninit
> functions?

ok, that seems like a good idea to creat two new functions to do initialize of flow director and l2 tunnel fliter.
That will make dev init function more clear. I wiil add two init function for this purpose in v2.  

> 
> >  	return 0;
> >  }
> >
> > @@ -1297,6 +1329,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev
> > *eth_dev)  {
> >  	struct rte_pci_device *pci_dev;
> >  	struct ixgbe_hw *hw;
> > +	struct ixgbe_hw_fdir_info *fdir_info =
> > +		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data-
> >dev_private);
> > +	struct ixgbe_fdir_filter *fdir_filter;
> >
> >  	PMD_INIT_FUNC_TRACE();
> >
> > @@ -1330,6 +1365,19 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev
> *eth_dev)
> >  	rte_free(eth_dev->data->hash_mac_addrs);
> >  	eth_dev->data->hash_mac_addrs = NULL;
> >
> > +	/* remove all the fdir filters & hash */
> > +	if (fdir_info->hash_map)
> > +		rte_free(fdir_info->hash_map);
> > +	if (fdir_info->hash_handle)
> > +		rte_hash_free(fdir_info->hash_handle);
> 
> All rte_hash_xxx() APIs gives build error for shared library build, [1] needs to
> be added into Makefile.
> 
> But, this makes hash library a dependency to the PMD, do you think is there
> a way to escape from this?
> 
> [1]
> +DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net
> +lib/librte_hash
> 

Ok, I will change as your suggestion in v2.

> > +
> > +	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
> > +		TAILQ_REMOVE(&fdir_info->fdir_list,
> > +			     fdir_filter,
> > +			     entries);
> > +		rte_free(fdir_filter);
> > +	}
> > +
> >  	return 0;
> >  }
> >
> <...>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 04/18] net/ixgbe: restore n-tuple filter
  2016-12-20 16:58   ` Ferruh Yigit
@ 2016-12-26  3:32     ` Zhao1, Wei
  0 siblings, 0 replies; 36+ messages in thread
From: Zhao1, Wei @ 2016-12-26  3:32 UTC (permalink / raw)
  To: Yigit, Ferruh, dev; +Cc: Lu, Wenzhuo

Hi,  Ferruh

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, December 21, 2016 12:59 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 04/18] net/ixgbe: restore n-tuple filter
> 
> On 12/2/2016 10:43 AM, Wei Zhao wrote:
> > From: wei zhao1 <wei.zhao1@intel.com>
> >
> > Add support for restoring n-tuple filter in SW.
> >
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ixgbe/ixgbe_ethdev.c | 131 +++++++++++++++++++++++++--
> ------------
> >  1 file changed, 83 insertions(+), 48 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> b/drivers/net/ixgbe/ixgbe_ethdev.c
> <...>
> > @@ -2482,6 +2496,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
> >
> >  	/* resume enabled intr since hw reset */
> >  	ixgbe_enable_intr(dev);
> > +	ixgbe_filter_restore(dev);
> 
> Just to double check, when you stop the device does 5tuple_filter reset?
> If not reset, will adding same filters cause any problem?

yes, there is reset the NIC function of ixgbe_pf_reset_hw(hw) in dev stop process.

> 
> >
> >  	return 0;
> >
> <...>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 15/18] net/ixgbe: parse flow director filter
  2016-12-23  8:13         ` Adrien Mazarguil
@ 2016-12-27  3:31           ` Zhao1, Wei
  0 siblings, 0 replies; 36+ messages in thread
From: Zhao1, Wei @ 2016-12-27  3:31 UTC (permalink / raw)
  To: Adrien Mazarguil, Yigit, Ferruh; +Cc: dev, Lu, Wenzhuo

Hi,  Adrien

> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Friday, December 23, 2016 4:13 PM
> To: Yigit, Ferruh <ferruh.yigit@intel.com>
> Cc: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 15/18] net/ixgbe: parse flow director filter
> 
> Hi,
> 
> On Thu, Dec 22, 2016 at 10:44:32AM +0000, Ferruh Yigit wrote:
> > On 12/22/2016 9:19 AM, Zhao1, Wei wrote:
> > > Hi, Yigit
> > >
> > >> -----Original Message-----
> > >> From: Yigit, Ferruh
> > >> Sent: Wednesday, December 21, 2016 1:01 AM
> > >> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> > >> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> > >> Subject: Re: [dpdk-dev] [PATCH 15/18] net/ixgbe: parse flow
> > >> director filter
> > >>
> > >> On 12/2/2016 10:43 AM, Wei Zhao wrote:
> > >>> From: wei zhao1 <wei.zhao1@intel.com>
> > >>>
> > >>> check if the rule is a flow director rule, and get the flow director info.
> > >>>
> > >>> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> > >>> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > >>> ---
> > >>
> > >> <...>
> > >>
> > >>> +	PATTERN_SKIP_VOID(rule, struct ixgbe_fdir_rule,
> > >>> +			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
> > >>> +	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> > >>> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
> > >>> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
> > >>> +	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
> > >>> +	    item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
> > >>> +	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> > >>
> > >> This gives build error [1], there are a few more same usage:
> > >>
> > >> .../drivers/net/ixgbe/ixgbe_ethdev.c:9238:17: error: comparison of
> > >> constant
> > >> 241 with expression of type 'const enum rte_flow_item_type' is
> > >> always true [-Werror,-Wtautological-constant-out-of-range-compare]
> > >>             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
> > >>
> > >>
> > >>
> > >
> > > Ok, I will add two type definition RTE_FLOW_ITEM_TYPE_NVGRE and
> RTE_FLOW_ITEM_TYPE_E_TAG  into const enum rte_flow_item_type to
> eliminate this problem.
> > > Thank you.
> > >
> >
> > CC: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> 
> Thanks, the original message did not catch my attention.
> 
> > Yes, that is what right thing to do, since rte_flow patchset not
> > merged yet, perhaps Adrien may want to include this as next version of
> > his patchset?
> >
> > What do you think Adrien?
> 
> I think by now the rte_flow series is automatically categorized as spam by
> half the community already, and that new items such as these can be added
> subsequently on their own, ideally before the entire ixgbe/i40e series.
> 
> I have a few comments regarding these new items if we want to make them
> part of rte_flow.h (see definitions below).
> 
oK , I will add these type definition by my patch in v2

> Unfortunately, even though they are super convenient to use and expose in
> the testpmd flow command, we need to avoid C-style bit-field definitions
> such as these for protocol header matching because they are not endian-
> safe, particularly multi-byte fields. Only meta-data that should be interpreted
> with host endianness can use them (e.g. struct rte_flow_attr, struct
> rte_flow_action_vf, etc):
> 
>  struct rte_flow_item_nvgre {
>         uint32_t flags0:1; /**< 0 */
>         uint32_t rsvd1:1; /**< 1 bit not defined */
>         uint32_t flags1:2; /**< 2 bits, 1 0 */
>         uint32_t rsvd0:9; /**< Reserved0 */
>         uint32_t ver:3; /**< version */
>         uint32_t protocol:16; /**< protocol type, 0x6558 */
>         uint32_t tni:24; /**< tenant network ID or virtual subnet ID */
>         uint32_t flow_id:8; /**< flow ID or Reserved */  };
> 
> For an example how to avoid them, see struct ipv6_hdr definition in rte_ip.h,
> where field vtc_flow is 32 bit but covers three protocol fields and is
> considered big-endian (Nelio's endianness series [1] would be really handy to
> eliminate confusion here). Also see struct rte_flow_item_vxlan, which
> covers 24-bit fields using uint8_t arrays.
> 
>  struct rte_flow_item_e_tag {
>         struct ether_addr dst; /**< Destination MAC. */
>         struct ether_addr src; /**< Source MAC. */
>         uint16_t e_tag_ethertype; /**< E-tag EtherType, 0x893F. */
>         uint16_t e_pcp:3; /**<  E-PCP */
>         uint16_t dei:1; /**< DEI */
>         uint16_t in_e_cid_base:12; /**< Ingress E-CID base */
>         uint16_t rsv:2; /**< reserved */
>         uint16_t grp:2; /**< GRP */
>         uint16_t e_cid_base:12; /**< E-CID base */
>         uint16_t in_e_cid_ext:8; /**< Ingress E-CID extend */
>         uint16_t e_cid_ext:8; /**< E-CID extend */
>         uint16_t type; /**< MAC type. */
>         unsigned int tags; /**< Number of 802.1Q/ad tags defined. */
>         struct {
>                 uint16_t tpid; /**< Tag protocol identifier. */
>                 uint16_t tci; /**< Tag control information. */
>         } tag[]; /**< 802.1Q/ad tag definitions, outermost first. */  };
> 
> Besides the bit-field issue for this one, looks like it should be split, at least the
> initial part is already covered by rte_flow_item_eth.
> 
> I do not know much about E-Tag (IEEE 802.1BR right?) but it sort of looks like a
> 2.5 layer protocol reminiscent of VLAN.
> 
> tags and tag[] fields seem based on the VLAN definition of the original
> rte_flow RFC that has since been replaced with stacked rte_flow_item_vlan
> items, much easier to program for. Perhaps this can be relied on instead of
> having e_tag implement its own variant.
> 
> As a protocol-matching item and E-Tag TCI being 6 bytes according to IEEE
> 802.1BR, sizeof(struct rte_flow_item_e_tag) should ideally return 6 as well
> and would likely comprise three big-endian uint16_t fields. See how
> PCP/DEI/VID VLAN fields are interpreted in testpmd [2].
> 
> Again, these concerns only stand if you intend to include these definitions
> into rte_flow.h.
> 
> [1] http://dpdk.org/ml/archives/dev/2016-November/050060.html
> [2] http://dpdk.org/ml/archives/dev/2016-December/052976.html
> 
> --
> Adrien Mazarguil
> 6WIND

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 01/18] net/ixgbe: store SYN filter
  2016-12-22  9:48     ` Zhao1, Wei
@ 2017-01-06 16:26       ` Ferruh Yigit
  2017-01-10  6:48         ` Zhao1, Wei
  0 siblings, 1 reply; 36+ messages in thread
From: Ferruh Yigit @ 2017-01-06 16:26 UTC (permalink / raw)
  To: Zhao1, Wei, dev; +Cc: Lu, Wenzhuo

On 12/22/2016 9:48 AM, Zhao1, Wei wrote:
> Hi, Yigit
> 
>> -----Original Message-----
>> From: Yigit, Ferruh
>> Sent: Wednesday, December 21, 2016 12:56 AM
>> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
>> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
>> Subject: Re: [dpdk-dev] [PATCH 01/18] net/ixgbe: store SYN filter
>>
>> On 12/2/2016 10:42 AM, Wei Zhao wrote:
>>> From: wei zhao1 <wei.zhao1@intel.com>
>>>
>>> Add support for storing SYN filter in SW.
>>
>> Do you think does it makes more clear to refer as TCP SYN filter? Or SYN filter
>> is clear enough?
>>
> 
> Ok, I will change to " TCP SYN filter " to make it more clear
> 
>>>
>>> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
>>> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
>>
>> Can you please update sign-off to your actual name?
>>
> 
> Ok, I will change to " Signed-off-by: Wei Zhao <wei.zhao1@intel.com>"
> 
>>> ---
>>>  drivers/net/ixgbe/ixgbe_ethdev.c | 12 ++++++++++--
>>> drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
>>>  2 files changed, 12 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
>>> b/drivers/net/ixgbe/ixgbe_ethdev.c
>>> index edc9b22..7f10cca 100644
>>> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
>>> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
>>> @@ -1287,6 +1287,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
>>>  	memset(filter_info->fivetuple_mask, 0,
>>>  	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
>>>
>>> +	/* initialize SYN filter */
>>> +	filter_info->syn_info = 0;
>>
>> can it be an option to memset all filter_info? (and of course move list init
>> after memset)
>>
> 
> Maybe, change to the following code?
> 
> memset(filter_info, 0, sizeof(struct ixgbe_filter_info)); 
> TAILQ_INIT(&filter_info->fivetuple_list);
> 
> But that wiil mix /* initialize ether type filter */ and /* initialize 5tuple filter list */ two process together,
> Because  struct ixgbe_filter_info store two type info of ether and 5tuple.

I see filter info consist of different filter types, but as far as I can
see they are not used before this memset, so what is the problem of
cleaning all struct?

Currently memset a sub-set of struct, and assigns zero to other field
explicitly, and rest remains undefined and unused. I am suggesting
memset whole structure and get rid of zero assignment.

> So, not to change ?
> 
> struct ixgbe_filter_info {
> 	uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter */
> 	/* store used ethertype filters*/
> 	struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
> 	/* Bit mask for every used 5tuple filter */
> 	uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
> 	struct ixgbe_5tuple_filter_list fivetuple_list;
> 	/* store the SYN filter info */
> 	uint32_t syn_info;
> };
> 
> 
<...>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 01/18] net/ixgbe: store SYN filter
  2017-01-06 16:26       ` Ferruh Yigit
@ 2017-01-10  6:48         ` Zhao1, Wei
  0 siblings, 0 replies; 36+ messages in thread
From: Zhao1, Wei @ 2017-01-10  6:48 UTC (permalink / raw)
  To: Yigit, Ferruh, dev; +Cc: Lu, Wenzhuo

Hi, yigit

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Saturday, January 7, 2017 12:26 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 01/18] net/ixgbe: store SYN filter
> 
> On 12/22/2016 9:48 AM, Zhao1, Wei wrote:
> > Hi, Yigit
> >
> >> -----Original Message-----
> >> From: Yigit, Ferruh
> >> Sent: Wednesday, December 21, 2016 12:56 AM
> >> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> >> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> >> Subject: Re: [dpdk-dev] [PATCH 01/18] net/ixgbe: store SYN filter
> >>
> >> On 12/2/2016 10:42 AM, Wei Zhao wrote:
> >>> From: wei zhao1 <wei.zhao1@intel.com>
> >>>
> >>> Add support for storing SYN filter in SW.
> >>
> >> Do you think does it makes more clear to refer as TCP SYN filter? Or
> >> SYN filter is clear enough?
> >>
> >
> > Ok, I will change to " TCP SYN filter " to make it more clear
> >
> >>>
> >>> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> >>> Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
> >>
> >> Can you please update sign-off to your actual name?
> >>
> >
> > Ok, I will change to " Signed-off-by: Wei Zhao <wei.zhao1@intel.com>"
> >
> >>> ---
> >>>  drivers/net/ixgbe/ixgbe_ethdev.c | 12 ++++++++++--
> >>> drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
> >>>  2 files changed, 12 insertions(+), 2 deletions(-)
> >>>
> >>> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> >>> b/drivers/net/ixgbe/ixgbe_ethdev.c
> >>> index edc9b22..7f10cca 100644
> >>> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> >>> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> >>> @@ -1287,6 +1287,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev
> *eth_dev)
> >>>  	memset(filter_info->fivetuple_mask, 0,
> >>>  	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
> >>>
> >>> +	/* initialize SYN filter */
> >>> +	filter_info->syn_info = 0;
> >>
> >> can it be an option to memset all filter_info? (and of course move
> >> list init after memset)
> >>
> >
> > Maybe, change to the following code?
> >
> > memset(filter_info, 0, sizeof(struct ixgbe_filter_info));
> > TAILQ_INIT(&filter_info->fivetuple_list);
> >
> > But that wiil mix /* initialize ether type filter */ and /* initialize
> > 5tuple filter list */ two process together, Because  struct ixgbe_filter_info
> store two type info of ether and 5tuple.
> 
> I see filter info consist of different filter types, but as far as I can see they are
> not used before this memset, so what is the problem of cleaning all struct?
> 
> Currently memset a sub-set of struct, and assigns zero to other field explicitly,
> and rest remains undefined and unused. I am suggesting memset whole
> structure and get rid of zero assignment.
> 

Ok, do as your suggestion in v3.

> > So, not to change ?
> >
> > struct ixgbe_filter_info {
> > 	uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter
> */
> > 	/* store used ethertype filters*/
> > 	struct ixgbe_ethertype_filter
> ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
> > 	/* Bit mask for every used 5tuple filter */
> > 	uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
> > 	struct ixgbe_5tuple_filter_list fivetuple_list;
> > 	/* store the SYN filter info */
> > 	uint32_t syn_info;
> > };
> >
> >
> <...>

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2017-01-10  6:48 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-02 10:42 [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
2016-12-02 10:42 ` [PATCH 01/18] net/ixgbe: store SYN filter Wei Zhao
2016-12-20 16:55   ` Ferruh Yigit
2016-12-22  9:48     ` Zhao1, Wei
2017-01-06 16:26       ` Ferruh Yigit
2017-01-10  6:48         ` Zhao1, Wei
2016-12-26  1:47     ` Zhao1, Wei
2016-12-02 10:42 ` [PATCH 02/18] net/ixgbe: store flow director filter Wei Zhao
2016-12-20 16:58   ` Ferruh Yigit
2016-12-26  2:50     ` Zhao1, Wei
2016-12-02 10:42 ` [PATCH 03/18] net/ixgbe: store L2 tunnel filter Wei Zhao
2016-12-02 10:43 ` [PATCH 04/18] net/ixgbe: restore n-tuple filter Wei Zhao
2016-12-20 16:58   ` Ferruh Yigit
2016-12-26  3:32     ` Zhao1, Wei
2016-12-02 10:43 ` [PATCH 05/18] net/ixgbe: restore ether type filter Wei Zhao
2016-12-02 10:43 ` [PATCH 06/18] net/ixgbe: restore SYN filter Wei Zhao
2016-12-02 10:43 ` [PATCH 07/18] net/ixgbe: restore flow director filter Wei Zhao
2016-12-02 10:43 ` [PATCH 08/18] net/ixgbe: restore L2 tunnel filter Wei Zhao
2016-12-02 10:43 ` [PATCH 09/18] net/ixgbe: store and restore L2 tunnel configuration Wei Zhao
2016-12-02 10:43 ` [PATCH 10/18] net/ixgbe: flush all the filters Wei Zhao
2016-12-02 10:43 ` [PATCH 11/18] net/ixgbe: parse n-tuple filter Wei Zhao
2016-12-20 17:23   ` Ferruh Yigit
2016-12-02 10:43 ` [PATCH 12/18] net/ixgbe: parse ethertype filter Wei Zhao
2016-12-02 10:43 ` [PATCH 13/18] net/ixgbe: parse SYN filter Wei Zhao
2016-12-02 10:43 ` [PATCH 14/18] net/ixgbe: parse L2 tunnel filter Wei Zhao
2016-12-02 10:43 ` [PATCH 15/18] net/ixgbe: parse flow director filter Wei Zhao
2016-12-20 17:00   ` Ferruh Yigit
2016-12-22  9:19     ` Zhao1, Wei
2016-12-22 10:44       ` Ferruh Yigit
2016-12-23  8:13         ` Adrien Mazarguil
2016-12-27  3:31           ` Zhao1, Wei
2016-12-02 10:43 ` [PATCH 16/18] net/ixgbe: create consistent filter Wei Zhao
2016-12-20 17:25   ` Ferruh Yigit
2016-12-23  6:26     ` Zhao1, Wei
2016-12-02 10:43 ` [PATCH 17/18] net/ixgbe: destroy " Wei Zhao
2016-12-02 10:43 ` [PATCH 18/18] net/ixgbe: flush " Wei Zhao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.