All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/7] GTP enabling
@ 2017-08-25  7:50 Beilei Xing
  2017-08-25  7:50 ` [PATCH 1/7] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
                   ` (6 more replies)
  0 siblings, 7 replies; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

This patch set enables RSS/FDIR/cloud filter
for GTP-C and GTP-U.

Beilei Xing (7):
  net/i40e: support RSS for GTP-C and GTP-U
  ethdev: add GTP item
  app/testpmd: add GTP fields to flow command
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  22 ++
 app/test-pmd/config.c                       |   1 +
 doc/guides/prog_guide/rte_flow.rst          |  12 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
 drivers/net/i40e/i40e_ethdev.c              | 169 +++++++++
 drivers/net/i40e/i40e_ethdev.h              | 118 ++++++-
 drivers/net/i40e/i40e_fdir.c                | 528 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 283 ++++++++++++---
 lib/librte_ether/rte_flow.h                 |  31 ++
 9 files changed, 1104 insertions(+), 64 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH 1/7] net/i40e: support RSS for GTP-C and GTP-U
  2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
@ 2017-08-25  7:50 ` Beilei Xing
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
  2017-08-25  7:50 ` [PATCH 2/7] ethdev: add GTP item Beilei Xing
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

GTP-C and GTP-U are supported by new profile.
Add new PCTYPE and enable RSS for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 14 ++++++++++++++
 drivers/net/i40e/i40e_ethdev.h | 10 +++++++++-
 2 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4a2e3f2..7c9e5af 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -197,6 +197,8 @@
 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
 /* UDP Tunneling ID, NVGRE/GRE key */
 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
+/* GTP TEID */
+#define I40E_REG_INSET_GTP_TEID                  0x0000000000020000ULL
 /* Last ether type */
 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
 /* Tunneling outer destination IPv4 address */
@@ -6760,6 +6762,11 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
 	else
 		hena &= ~I40E_RSS_HENA_ALL;
 	hena |= i40e_config_hena(rss_hf, hw->mac.type);
+
+	/* Enable GTP-C/U by default */
+	hena |= 1ULL << I40E_FILTER_PCTYPE_GTPC;
+	hena |= 1ULL << I40E_FILTER_PCTYPE_GTPU;
+
 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
 	I40E_WRITE_FLUSH(hw);
@@ -8123,6 +8130,12 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
 			I40E_INSET_FLEX_PAYLOAD,
+		[I40E_FILTER_PCTYPE_GTPC] =
+			I40E_INSET_GTP_TEID | I40E_INSET_IPV4_SRC |
+			I40E_INSET_IPV4_DST,
+		[I40E_FILTER_PCTYPE_GTPU] =
+			I40E_INSET_GTP_TEID | I40E_INSET_IPV4_SRC |
+			I40E_INSET_IPV4_DST,
 	};
 
 	/**
@@ -8449,6 +8462,7 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
+		{I40E_INSET_GTP_TEID, I40E_REG_INSET_GTP_TEID},
 	};
 
     /* some different registers map in x722*/
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 48abc05..ab2a5cd 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -119,6 +119,10 @@ enum i40e_flxpld_layer_idx {
 #define I40E_FDIR_MAX_FLEX_LEN      16 /* len in bytes of flex payload */
 #define I40E_INSET_MASK_NUM_REG     2  /* number of input set mask registers */
 
+/* New PCTYE for GTP-C and GTP-U */
+#define I40E_FILTER_PCTYPE_GTPC		23
+#define I40E_FILTER_PCTYPE_GTPU		24
+
 /* i40e flags */
 #define I40E_FLAG_RSS                   (1ULL << 0)
 #define I40E_FLAG_DCB                   (1ULL << 1)
@@ -234,6 +238,8 @@ enum i40e_flxpld_layer_idx {
 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
 
+#define I40E_INSET_GTP_TEID              0x0000004000000000ULL
+
 /* bit 48 ~ bit 55 */
 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
 
@@ -1131,7 +1137,9 @@ i40e_calc_itr_interval(int16_t interval)
 	(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
 	(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
 	(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
-	(pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+	(pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD || \
+	(pctype) == I40E_FILTER_PCTYPE_GTPC || \
+	(pctype) == I40E_FILTER_PCTYPE_GTPU)
 
 #define I40E_PHY_TYPE_SUPPORT_40G(phy_type) \
 	(((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_KR4) || \
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH 2/7] ethdev: add GTP item
  2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
  2017-08-25  7:50 ` [PATCH 1/7] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
@ 2017-08-25  7:50 ` Beilei Xing
  2017-09-06 16:02   ` Adrien Mazarguil
  2017-08-25  7:50 ` [PATCH 3/7] app/testpmd: add GTP fields to flow command Beilei Xing
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

This patch adds GTP items to generic rte flow.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 doc/guides/prog_guide/rte_flow.rst | 12 ++++++++++++
 lib/librte_ether/rte_flow.h        | 31 +++++++++++++++++++++++++++++++
 2 files changed, 43 insertions(+)

diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..2843b71 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,18 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTP``
+^^^^^^^^^^^^^^
+
+Matches a GTP header.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number flag(1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: TEID.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..73305aa 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,13 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header
+	 *
+	 * See struct rte_flow_item.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTP,
 };
 
 /**
@@ -735,6 +742,30 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTP header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version(2b), Protocol type(1b), Reserved(1b),
+	 * Extension header flag(1b),
+	 * Sequqnce number flag(1b),
+	 * N-PDU number flag(1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type */
+	rte_be16_t msg_len; /**< Message length */
+	rte_be32_t teid;
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+	static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+		.teid = RTE_BE32(0xffffffff),
+	};
+#endif
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH 3/7] app/testpmd: add GTP fields to flow command
  2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
  2017-08-25  7:50 ` [PATCH 1/7] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
  2017-08-25  7:50 ` [PATCH 2/7] ethdev: add GTP item Beilei Xing
@ 2017-08-25  7:50 ` Beilei Xing
  2017-09-06 16:03   ` Adrien Mazarguil
  2017-08-25  7:50 ` [PATCH 4/7] net/i40e: finish integration FDIR with generic flow API Beilei Xing
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

This patch exposes the following item fields through the flow command:

- GTP TEID

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 app/test-pmd/cmdline_flow.c                 | 22 ++++++++++++++++++++++
 app/test-pmd/config.c                       |  1 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 ++++
 3 files changed, 27 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..4ab5bcc 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,8 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTP,
+	ITEM_GTP_TEID,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +453,7 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTP,
 	ZERO,
 };
 
@@ -588,6 +591,12 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtp[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1430,19 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTP] = {
+		.name = "gtp",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTP_TEID] = {
+		.name = "teid",
+		.help = "GTP TEID",
+		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 3ae3e1c..1f320a0 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -947,6 +947,7 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5..6ec463e 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2696,6 +2696,10 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtp``: match GTP header.
+
+    - ``teid {unsigned}``: GTP TEID.
+
 Actions list
 ^^^^^^^^^^^^
 
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH 4/7] net/i40e: finish integration FDIR with generic flow API
  2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
                   ` (2 preceding siblings ...)
  2017-08-25  7:50 ` [PATCH 3/7] app/testpmd: add GTP fields to flow command Beilei Xing
@ 2017-08-25  7:50 ` Beilei Xing
  2017-08-25  7:50 ` [PATCH 5/7] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  94 +++++++-
 drivers/net/i40e/i40e_fdir.c   | 490 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  76 +++----
 3 files changed, 597 insertions(+), 63 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ab2a5cd..6d871e4 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -466,6 +466,91 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/**
+ * A union contains the inputs for all types of flow
+ * Items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/**
+ * A structure used to contain extend input of flow
+ */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/**< It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /**< 1 for VF, 0 for port dev */
+	uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
+};
+
+/**
+ * A structure used to define the input for a flow director filter entry
+ */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/**< Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/**< Additional fields to match */
+};
+
+/**
+ * Behavior will be taken if FDIR match
+ */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/**
+ * Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /**< Report nothing. */
+	I40E_FDIR_REPORT_ID,            /**< Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /**< Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /**< Report 8 flex bytes. */
+};
+
+/**
+ * A structure used to define an action when match FDIR packet filter.
+ */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /**< Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /**< Behavior will be taken */
+	enum i40e_fdir_status report_status;  /**< Status report option */
+	/**
+	 * If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/**
+ * A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/**< ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /**< Input set */
+	struct i40e_fdir_action action;  /**< Action taken when match */
+};
+
 /*
  * Structure to store flex pit for flow diretor.
  */
@@ -489,7 +574,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -893,7 +978,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -967,7 +1052,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -980,6 +1065,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 8013add..b0ba819 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -934,6 +939,263 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1007,17 +1269,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1052,7 +1314,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1082,16 +1344,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1373,69 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES(
+			(int)i40e_flowtype_to_pctype(
+			filter->input.flow_type)));
+	} else
+		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
+			    " check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1454,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1142,12 +1464,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
 			hw, I40E_GLQF_FD_PCTYPES(
-			(int)i40e_flowtype_to_pctype(
-			filter->input.flow_type)));
+			(int)filter->input.pctype));
 	} else
-		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+		pctype = filter->input.pctype;
 
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1302,6 +1623,141 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 }
 
 /*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
+	for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if (i40e_check_fdir_programming_status(rxq) >= 0)
+			return 0;
+		rte_delay_us(1);
+	}
+	PMD_DRV_LOG(ERR,
+		 "Failed to program FDIR filter: programming status reported.");
+	return -ETIMEDOUT;
+}
+
+/*
  * i40e_fdir_flush - clear all filters of Flow Director table
  * @pf: board private structure
  */
@@ -1580,7 +2036,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,14 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(flow_type);
-	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2797,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2832,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2855,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2876,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2911,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3877,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3927,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH 5/7] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
                   ` (3 preceding siblings ...)
  2017-08-25  7:50 ` [PATCH 4/7] net/i40e: finish integration FDIR with generic flow API Beilei Xing
@ 2017-08-25  7:50 ` Beilei Xing
  2017-08-25  7:50 ` [PATCH 6/7] net/i40e: add cloud filter parsing function for GTP Beilei Xing
  2017-08-25  7:50 ` [PATCH 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

This patch adds FDIR support for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c |  2 ++
 drivers/net/i40e/i40e_ethdev.h |  9 ++++++++
 drivers/net/i40e/i40e_fdir.c   | 42 ++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   | 48 +++++++++++++++++++++++++++++++++++++++++-
 4 files changed, 98 insertions(+), 3 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7c9e5af..d7ef782 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -8228,6 +8228,8 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
 		I40E_INSET_LAST_ETHER_TYPE,
+		[I40E_FILTER_PCTYPE_GTPC] = I40E_INSET_GTP_TEID,
+		[I40E_FILTER_PCTYPE_GTPU] = I40E_INSET_GTP_TEID,
 	};
 
 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6d871e4..1681bad 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -467,6 +467,14 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /**
+ * A structure used to define the input for IPV4 GTP flow
+ */
+struct i40e_gtpv4_flow {
+	struct rte_eth_udpv4_flow udp; /**< IPv4 UDP fields to match. */
+	uint32_t teid;                 /**< TEID in big endian. */
+};
+
+/**
  * A union contains the inputs for all types of flow
  * Items in flows need to be in big endian
  */
@@ -480,6 +488,7 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtpv4_flow     gtpv4_flow;
 };
 
 /**
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index b0ba819..707ab4d 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,7 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -949,6 +950,7 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -960,6 +962,8 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_GTPC] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_GTPU] = IPPROTO_UDP,
 	};
 
 	raw_pkt += 2 * sizeof(struct ether_addr);
@@ -975,7 +979,7 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
+	switch (pctype) {
 	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
 		break;
@@ -984,6 +988,8 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
 	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
 	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	case I40E_FILTER_PCTYPE_GTPC:
+	case I40E_FILTER_PCTYPE_GTPU:
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
@@ -1062,9 +1068,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
 
 	/* fill the ethernet and IP head */
 	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
@@ -1073,7 +1081,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
+	switch (pctype) {
 	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
@@ -1174,6 +1182,36 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
 		break;
+	case I40E_FILTER_PCTYPE_GTPC:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		udp->dst_port = rte_cpu_to_be_16(2123);
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+		gtp = (struct rte_flow_item_gtp *)
+			((unsigned char *)udp + sizeof(struct udp_hdr));
+		gtp->v_pt_rsv_flags = 0x30;
+		gtp->msg_len = rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+		gtp->teid = fdir_input->flow.gtpv4_flow.teid;
+		gtp->msg_type = 0xff;
+
+		payload = (unsigned char *)gtp +
+			   sizeof(struct rte_flow_item_gtp);
+		break;
+	case I40E_FILTER_PCTYPE_GTPU:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		udp->dst_port = rte_cpu_to_be_16(2152);
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+		gtp = (struct rte_flow_item_gtp *)
+			((unsigned char *)udp + sizeof(struct udp_hdr));
+		gtp->v_pt_rsv_flags = 0x30;
+		gtp->msg_len = rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+		gtp->teid = fdir_input->flow.gtpv4_flow.teid;
+		gtp->msg_type = 0xff;
+
+		payload = (unsigned char *)gtp +
+			   sizeof(struct rte_flow_item_gtp);
+		break;
 	default:
 		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
 		return -EINVAL;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..698368e 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,14 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -1576,6 +1584,7 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
@@ -2326,10 +2335,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	uint8_t pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2351,6 +2361,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	uint16_t outer_tpid;
 	uint16_t ether_type;
 	uint32_t vtc_flow_cpu;
+	uint16_t udp_dst_port = 0;
 	int ret;
 
 	memset(off_arr, 0, sizeof(off_arr));
@@ -2631,11 +2642,46 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					filter->input.flow.udp6_flow.dst_port =
 						udp_spec->hdr.dst_port;
 				}
+
+				udp_dst_port = udp_spec->hdr.dst_port;
 			}
 
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTP:
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				input_set |= I40E_INSET_GTP_TEID;
+				filter->input.flow.gtpv4_flow.teid =
+					gtp_spec->teid;
+
+				if (udp_dst_port == rte_cpu_to_be_16(2123))
+					pctype = I40E_FILTER_PCTYPE_GTPC;
+				else if (udp_dst_port == rte_cpu_to_be_16(2152))
+					pctype = I40E_FILTER_PCTYPE_GTPU;
+				else {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP flow");
+					return -rte_errno;
+				}
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH 6/7] net/i40e: add cloud filter parsing function for GTP
  2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
                   ` (4 preceding siblings ...)
  2017-08-25  7:50 ` [PATCH 5/7] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-08-25  7:50 ` Beilei Xing
  2017-08-25  7:50 ` [PATCH 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 161 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 163 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1681bad..3b7a837 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -699,6 +699,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 698368e..583c1b2 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1741,6 +1747,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtp, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3682,6 +3690,159 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_gtp_pattern(__rte_unused struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+	uint16_t udp_dst_port = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = (const struct rte_flow_item_udp *)item->spec;
+			udp_mask = (const struct rte_flow_item_udp *)item->mask;
+			if (!udp_spec || !udp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Need UDP dst spec and mask to distinguish gtp-c/gtp-u");
+				return -rte_errno;
+			}
+			if (udp_mask->hdr.src_port ||
+			    udp_mask->hdr.dst_port ||
+			    udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item mask");
+				return -rte_errno;
+			}
+			udp_dst_port = udp_spec->hdr.dst_port;
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTP:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (udp_dst_port == rte_cpu_to_be_16(2123))
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (udp_dst_port == rte_cpu_to_be_16(2152))
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+			else {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP flow");
+				return -rte_errno;
+			}
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
                   ` (5 preceding siblings ...)
  2017-08-25  7:50 ` [PATCH 6/7] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-08-25  7:50 ` Beilei Xing
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-08-25  7:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev

GTP-C & GTP-U are not supported by cloud filter due
to limited resource of HW, this patch enables GTP-C
and GTP-U cloud filter by replacing inner_mac and
TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 153 +++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h |   3 +
 2 files changed, 156 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index d7ef782..974dc8d 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7170,6 +7170,123 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 	return status;
 }
 
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_GTPC;
+	filter_replace.tr_bit = 22 | 0x80;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_GTPC;
+	filter_replace.tr_bit = 21 | 0x80;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_TEID_GTPC;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_TEID_GTPU;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
 int
 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		      struct i40e_tunnel_filter_conf *tunnel_filter,
@@ -7270,6 +7387,36 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		big_buffer = 1;
 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
 		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
+			teid_le >> 16;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+			teid_le & 0xFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
+			teid_le >> 16;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+			teid_le & 0xFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
 			ret = i40e_cloud_filter_qinq_create(pf);
@@ -7300,6 +7447,12 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
 			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
 			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3b7a837..5bd0fb7 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -652,6 +652,8 @@ struct i40e_ethertype_rule {
 #define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
 #define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
 #define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_L1_FILTER_TEID_GTPC 0x12
+#define I40E_AQC_ADD_L1_FILTER_TEID_GTPU 0x13
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -886,6 +888,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 };
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* Re: [PATCH 2/7] ethdev: add GTP item
  2017-08-25  7:50 ` [PATCH 2/7] ethdev: add GTP item Beilei Xing
@ 2017-09-06 16:02   ` Adrien Mazarguil
  2017-09-07  6:31     ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Adrien Mazarguil @ 2017-09-06 16:02 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, dev

Hi Beilei,

On Fri, Aug 25, 2017 at 03:50:25PM +0800, Beilei Xing wrote:
> This patch adds GTP items to generic rte flow.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>

Thanks, several comments below.

> ---
>  doc/guides/prog_guide/rte_flow.rst | 12 ++++++++++++
>  lib/librte_ether/rte_flow.h        | 31 +++++++++++++++++++++++++++++++
>  2 files changed, 43 insertions(+)
> 
> diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
> index 662a912..2843b71 100644
> --- a/doc/guides/prog_guide/rte_flow.rst
> +++ b/doc/guides/prog_guide/rte_flow.rst
> @@ -955,6 +955,18 @@ Usage example, fuzzy match a TCPv4 packets:
>     | 4     | END      |
>     +-------+----------+
>  
> +Item: ``GTP``
> +^^^^^^^^^^^^^^
> +
> +Matches a GTP header.
> +
> +- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
> +  extension header flag (1b), sequence number flag (1b), N-PDU number flag(1b).

Missing space after "flag", this line is also too long, you should add a
line break after the last comma.

> +- ``msg_type``: message type.
> +- ``msg_len``: message length.
> +- ``teid``: TEID.
> +- Default ``mask`` matches teid only.
> +
>  Actions
>  ~~~~~~~
>  
> diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
> index bba6169..73305aa 100644
> --- a/lib/librte_ether/rte_flow.h
> +++ b/lib/librte_ether/rte_flow.h
> @@ -309,6 +309,13 @@ enum rte_flow_item_type {
>  	 * See struct rte_flow_item_fuzzy.
>  	 */
>  	RTE_FLOW_ITEM_TYPE_FUZZY,
> +
> +	/**
> +	 * Matches a GTP header

Missing "." (pedantic mode enabled).

> +	 *
> +	 * See struct rte_flow_item.

Wrong structure, should be rte_flow_item_gtp.

> +	 */
> +	RTE_FLOW_ITEM_TYPE_GTP,
>  };
>  
>  /**
> @@ -735,6 +742,30 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
>  #endif
>  
>  /**
> + * RTE_FLOW_ITEM_TYPE_GTP.
> + *
> + * Matches a GTP header.
> + */
> +struct rte_flow_item_gtp {
> +	/**
> +	 * Version(2b), Protocol type(1b), Reserved(1b),
> +	 * Extension header flag(1b),
> +	 * Sequqnce number flag(1b),
> +	 * N-PDU number flag(1b).
> +	 */

No need to capitalize everything, only the first one. Several missing spaces
betwen name and bit widths ("foo(42b)" => "foo (42b)").

There's also a typo on "Sequqnce".

> +	uint8_t v_pt_rsv_flags;
> +	uint8_t msg_type; /**< Message type */
> +	rte_be16_t msg_len; /**< Message length */

These comments lack ending ".".

> +	rte_be32_t teid;

This field lack a comment, even if obvious, please add it for
consistency. You can use this opportunity to expand the acronym as well.

> +};
> +
> +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
> +#ifndef __cplusplus
> +	static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
> +		.teid = RTE_BE32(0xffffffff),
> +	};
> +#endif

Indentation is wrong.

> +/**
>   * Matching pattern item definition.
>   *
>   * A pattern is formed by stacking items starting from the lowest protocol
> -- 
> 2.5.5
> 

It's not a problem if you want to keep them separate, however note that you
could make the testpmd update part of this commit. The API change can be
considered useless without an implementation counterpart.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH 3/7] app/testpmd: add GTP fields to flow command
  2017-08-25  7:50 ` [PATCH 3/7] app/testpmd: add GTP fields to flow command Beilei Xing
@ 2017-09-06 16:03   ` Adrien Mazarguil
  0 siblings, 0 replies; 116+ messages in thread
From: Adrien Mazarguil @ 2017-09-06 16:03 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, dev

On Fri, Aug 25, 2017 at 03:50:26PM +0800, Beilei Xing wrote:
> This patch exposes the following item fields through the flow command:
> 
> - GTP TEID
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>

Minor nits, see below.

> ---
>  app/test-pmd/cmdline_flow.c                 | 22 ++++++++++++++++++++++
>  app/test-pmd/config.c                       |  1 +
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 ++++
>  3 files changed, 27 insertions(+)
> 
> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> index a17a004..4ab5bcc 100644
> --- a/app/test-pmd/cmdline_flow.c
> +++ b/app/test-pmd/cmdline_flow.c
> @@ -171,6 +171,8 @@ enum index {
>  	ITEM_GRE_PROTO,
>  	ITEM_FUZZY,
>  	ITEM_FUZZY_THRESH,
> +	ITEM_GTP,
> +	ITEM_GTP_TEID,
>  
>  	/* Validate/create actions. */
>  	ACTIONS,
> @@ -451,6 +453,7 @@ static const enum index next_item[] = {
>  	ITEM_MPLS,
>  	ITEM_GRE,
>  	ITEM_FUZZY,
> +	ITEM_GTP,
>  	ZERO,
>  };
>  
> @@ -588,6 +591,12 @@ static const enum index item_gre[] = {
>  	ZERO,
>  };
>  
> +static const enum index item_gtp[] = {
> +	ITEM_GTP_TEID,
> +	ITEM_NEXT,
> +	ZERO,
> +};
> +
>  static const enum index next_action[] = {
>  	ACTION_END,
>  	ACTION_VOID,
> @@ -1421,6 +1430,19 @@ static const struct token token_list[] = {
>  		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
>  					thresh)),
>  	},
> +	[ITEM_GTP] = {
> +		.name = "gtp",
> +		.help = "match GTP header",
> +		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
> +		.next = NEXT(item_gtp),
> +		.call = parse_vc,
> +	},
> +	[ITEM_GTP_TEID] = {
> +		.name = "teid",
> +		.help = "GTP TEID",

No need to repeat "GTP". You may also expand the TEID acronym.

> +		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
> +		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
> +	},
>  
>  	/* Validate/create actions. */
>  	[ACTIONS] = {
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
> index 3ae3e1c..1f320a0 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -947,6 +947,7 @@ static const struct {
>  	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
>  	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
>  	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
> +	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
>  };
>  
>  /** Compute storage space needed by item specification. */
> diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> index 2ed62f5..6ec463e 100644
> --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> @@ -2696,6 +2696,10 @@ This section lists supported pattern items and their attributes, if any.
>  
>    - ``thresh {unsigned}``: accuracy threshold.
>  
> +- ``gtp``: match GTP header.
> +
> +    - ``teid {unsigned}``: GTP TEID.
> +

Same comment here. Indentation is also wrong.

Thanks.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH 2/7] ethdev: add GTP item
  2017-09-06 16:02   ` Adrien Mazarguil
@ 2017-09-07  6:31     ` Xing, Beilei
  0 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-07  6:31 UTC (permalink / raw)
  To: Adrien Mazarguil; +Cc: Wu, Jingjing, dev

Hi Adrien,

> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Thursday, September 7, 2017 12:03 AM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH 2/7] ethdev: add GTP item
> 
> Hi Beilei,
> 
> On Fri, Aug 25, 2017 at 03:50:25PM +0800, Beilei Xing wrote:
> > This patch adds GTP items to generic rte flow.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> 
> Thanks, several comments below.

Thanks for all the comments, will update them in V2.

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v2 0/6] GPT-C and GTP-U enabling
  2017-08-25  7:50 ` [PATCH 1/7] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
@ 2017-09-07 11:20   ` Beilei Xing
  2017-09-07 11:20     ` [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
                       ` (6 more replies)
  0 siblings, 7 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-07 11:20 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch set enables RSS/FDIR/cloud filter for
GPT-C and GTP-U.
It depends on Kirill's patch:
http://www.dpdk.org/dev/patchwork/patch/28294/

v2 changes:
 - Enable RSS/FDIR/cloud filter dinamicly by checking profile
 - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
 - Rework FDIR/cloud filter enabling function

Beilei Xing (6):
  net/i40e: support RSS for GTP-C and GTP-U
  ethdev: add GTPC and GTPU items
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  44 +++
 app/test-pmd/config.c                       |   2 +
 doc/guides/prog_guide/rte_flow.rst          |  26 ++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   8 +
 drivers/net/i40e/i40e_ethdev.c              | 356 +++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h              | 139 ++++++-
 drivers/net/i40e/i40e_fdir.c                | 544 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 403 ++++++++++++++++-----
 drivers/net/i40e/rte_pmd_i40e.c             |   4 +
 lib/librte_ether/rte_flow.h                 |  44 +++
 10 files changed, 1448 insertions(+), 122 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
@ 2017-09-07 11:20     ` Beilei Xing
  2017-09-18 14:17       ` Bruce Richardson
  2017-09-07 11:20     ` [PATCH v2 2/6] ethdev: add GTPC and GTPU items Beilei Xing
                       ` (5 subsequent siblings)
  6 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-07 11:20 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

GTP-C and GTP-U are supported by new profile.
Enable RSS for GTP-C and GTP-U after downloading
profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c  | 168 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h  |  21 +++++
 drivers/net/i40e/rte_pmd_i40e.c |   4 +
 3 files changed, 193 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4a2e3f2..5483622 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -65,6 +65,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -1034,6 +1035,24 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+i40e_init_customer_pctype(struct i40e_pf *pf)
+{
+	int i;
+
+	for (i = I40E_PERSONALIZED_GTPC; i < I40E_PERSONALIZED_MAX; i++) {
+		pf->new_pctype[i].index = i;
+		pf->new_pctype[i].pctype = I40E_INVALID_PCTYPE;
+		pf->new_pctype[i].valid = false;
+		if (i == I40E_PERSONALIZED_GTPC)
+			rte_memcpy(pf->new_pctype[i].name, "GTPC",
+				   sizeof("GTPC"));
+		else if (i == I40E_PERSONALIZED_GTPU)
+			rte_memcpy(pf->new_pctype[i].name, "GTPU",
+				   sizeof("GTPU"));
+	}
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1299,6 +1318,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
+	i40e_init_customer_pctype(pf);
+
 	ret = i40e_init_ethtype_filter_list(dev);
 	if (ret < 0)
 		goto err_init_ethtype_filter_list;
@@ -1903,6 +1924,30 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, true);
 }
 
+static void
+i40e_config_new_pctype(struct i40e_pf *pf, bool enable)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint64_t hena;
+	int i;
+
+	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+	for (i = 0; i < I40E_PERSONALIZED_MAX; i++) {
+		if (pf->new_pctype[i].valid == true) {
+			if (enable)
+				hena |= 1ULL << pf->new_pctype[i].pctype;
+			else
+				hena &= ~(1ULL << pf->new_pctype[i].pctype);
+		}
+	}
+
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+	I40E_WRITE_FLUSH(hw);
+}
+
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
@@ -2048,6 +2093,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	i40e_config_new_pctype(pf, true);
+
 	return I40E_SUCCESS;
 
 err_up:
@@ -2128,6 +2175,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 	uint32_t reg;
 	int i;
 
+	i40e_config_new_pctype(pf, false);
+
 	PMD_INIT_FUNC_TRACE();
 
 	i40e_dev_stop(dev);
@@ -6739,6 +6788,9 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
 	return 0;
 }
 
+#define I40E_PROFILE_INFO_SIZE sizeof(struct i40e_profile_info)
+#define I40E_MAX_PROFILE_NUM 16
+
 static int
 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
 {
@@ -6760,6 +6812,7 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
 	else
 		hena &= ~I40E_RSS_HENA_ALL;
 	hena |= i40e_config_hena(rss_hf, hw->mac.type);
+
 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
 	I40E_WRITE_FLUSH(hw);
@@ -10858,6 +10911,121 @@ is_i40e_supported(struct rte_eth_dev *dev)
 	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
+struct i40e_personalized_pctype*
+i40e_find_personalized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+	int i;
+
+	for (i = 0; i < I40E_PERSONALIZED_MAX; i++) {
+		if (pf->new_pctype[i].index == index)
+			return &pf->new_pctype[i];
+	}
+	return NULL;
+}
+
+void
+i40e_update_personalized_pctype(struct i40e_pf *pf, uint8_t *pkg,
+				uint32_t pkg_size)
+{
+	int proto_num;
+	struct rte_pmd_i40e_proto_info *proto;
+	int pctype_num;
+	struct rte_pmd_i40e_ptype_info *pctype;
+	struct i40e_personalized_pctype *new_pctype = NULL;
+	uint8_t proto_id;
+	uint8_t pctype_value;
+	char *name;
+	int i, j, n;
+	int ret;
+
+	/* get information about protocols */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+		(uint8_t *)&proto_num, sizeof(proto_num),
+		RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol number");
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new protocol added");
+		return;
+	}
+
+	proto = (struct rte_pmd_i40e_proto_info *)
+		malloc(proto_num * sizeof(struct rte_pmd_i40e_proto_info));
+	if (!proto) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return;
+	}
+
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)proto, proto_num,
+					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol list");
+		return;
+	}
+
+	/* get information about packet types */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				     (uint8_t *)&pctype_num, sizeof(pctype_num),
+				     RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype number");
+		free(proto);
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new pctype added");
+		free(proto);
+		return;
+	}
+	pctype = (struct rte_pmd_i40e_ptype_info *)
+		malloc(pctype_num * sizeof(struct rte_pmd_i40e_ptype_info));
+	if (!pctype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		free(proto);
+		return;
+	}
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)pctype, pctype_num,
+					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype list");
+		free(pctype);
+		free(proto);
+		return;
+	}
+	for (i = 0; i < pctype_num; i++) {
+		pctype_value = pctype[i].ptype_id;
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = pctype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				name = proto[n].name;
+				if (!memcmp(name, "GTPC", sizeof("GTPC")))
+					new_pctype =
+					       i40e_find_personalized_pctype(pf,
+							I40E_PERSONALIZED_GTPC);
+				else if (!memcmp(name, "GTPU", sizeof("GTPU")))
+					new_pctype =
+					       i40e_find_personalized_pctype(pf,
+							I40E_PERSONALIZED_GTPU);
+				if (new_pctype) {
+					new_pctype->pctype = pctype_value;
+					new_pctype->valid = true;
+				}
+				break;
+			}
+		}
+	}
+	free(pctype);
+	free(proto);
+}
+
 /* Create a QinQ cloud filter
  *
  * The Fortville NIC has limited resources for tunnel filters,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 48abc05..4f0aeda 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -722,6 +722,21 @@ struct i40e_tm_conf {
 	bool committed;
 };
 
+enum i40e_new_proto {
+	I40E_PERSONALIZED_GTPC = 0,
+	I40E_PERSONALIZED_GTPU,
+	I40E_PERSONALIZED_MAX,
+};
+
+#define I40E_INVALID_PCTYPE     0xFF
+#define I40E_NEW_PROTO_NAME_LEN 8
+struct i40e_personalized_pctype {
+	uint8_t index;    /* Indicate which personalized pctype */
+	char name[I40E_NEW_PROTO_NAME_LEN];
+	uint8_t pctype;   /* New pctype value */
+	bool valid;   /* Check if it's valid */
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -786,6 +801,8 @@ struct i40e_pf {
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
+	/* customer personalized pctype */
+	struct i40e_personalized_pctype new_pctype[I40E_PERSONALIZED_MAX];
 };
 
 enum pending_msg {
@@ -1003,6 +1020,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_personalized_pctype*
+i40e_find_personalized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_personalized_pctype(struct i40e_pf *pf, uint8_t *pkg,
+				     uint32_t pkg_size);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index 157fc12..f9283ce 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1565,6 +1565,7 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 {
 	struct rte_eth_dev *dev;
 	struct i40e_hw *hw;
+	struct i40e_pf *pf;
 	struct i40e_package_header *pkg_hdr;
 	struct i40e_generic_seg_header *profile_seg_hdr;
 	struct i40e_generic_seg_header *metadata_seg_hdr;
@@ -1588,6 +1589,7 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -ENOTSUP;
 
 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 
 	if (size < (sizeof(struct i40e_package_header) +
 		    sizeof(struct i40e_metadata_segment) +
@@ -1608,6 +1610,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -EINVAL;
 	}
 
+	i40e_update_personalized_pctype(pf, buff, size);
+
 	/* Find metadata segment */
 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
 							pkg_hdr);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v2 2/6] ethdev: add GTPC and GTPU items
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
  2017-09-07 11:20     ` [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
@ 2017-09-07 11:20     ` Beilei Xing
  2017-09-07 12:19       ` Adrien Mazarguil
  2017-09-07 11:21     ` [PATCH v2 3/6] net/i40e: finish integration FDIR with generic flow API Beilei Xing
                       ` (4 subsequent siblings)
  6 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-07 11:20 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds GTPC and GTPU items to generic rte
flow, and also exposes the following item fields
through the flow command:

- GTPC TEID
- GTPU TEID

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 app/test-pmd/cmdline_flow.c                 | 44 +++++++++++++++++++++++++++++
 app/test-pmd/config.c                       |  2 ++
 doc/guides/prog_guide/rte_flow.rst          | 26 +++++++++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  8 ++++++
 lib/librte_ether/rte_flow.h                 | 44 +++++++++++++++++++++++++++++
 5 files changed, 124 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..72d159c 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTPC,
+	ITEM_GTPC_TEID,
+	ITEM_GTPU,
+	ITEM_GTPU_TEID,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +455,8 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTPC,
+	ITEM_GTPU,
 	ZERO,
 };
 
@@ -588,6 +594,18 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtpc[] = {
+	ITEM_GTPC_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
+static const enum index item_gtpu[] = {
+	ITEM_GTPU_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1439,32 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTPC] = {
+		.name = "gtpc",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtpc),
+		.call = parse_vc,
+	},
+	[ITEM_GTPC_TEID] = {
+		.name = "teid",
+		.help = "TUNNEL ENDPOINT IDENTIFIER",
+		.next = NEXT(item_gtpc, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
+	[ITEM_GTPU] = {
+		.name = "gtpu",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtpu),
+		.call = parse_vc,
+	},
+	[ITEM_GTPU_TEID] = {
+		.name = "teid",
+		.help = "TUNNEL ENDPOINT IDENTIFIER",
+		.next = NEXT(item_gtpu, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 3ae3e1c..be4c3b9 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -947,6 +947,8 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..9e7179a 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,32 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTPC``
+^^^^^^^^^^^^^^
+
+Matches a GTP header.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: TEID.
+- Default ``mask`` matches teid only.
+
+Item: ``GTPU``
+^^^^^^^^^^^^^^
+
+Matches a GTP header.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: TEID.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5..2ca36ad 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2696,6 +2696,14 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtpc``: match GTP header.
+
+  - ``teid {unsigned}``: Tunnel endpoint identifier.
+
+- ``gtpu``: match GTP header.
+
+  - ``teid {unsigned}``: Tunnel endpoint identifier.
+
 Actions list
 ^^^^^^^^^^^^
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..8b24cac 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,24 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-C packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPC,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-U packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPU,
 };
 
 /**
@@ -735,6 +753,32 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTP header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version (2b), protocol type (1b), reserved (1b),
+	 * Extension header flag (1b),
+	 * Sequence number flag (1b),
+	 * N-PDU number flag (1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type. */
+	rte_be16_t msg_len; /**< Message length. */
+	rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+	.msg_type = 0x00,
+	.teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v2 3/6] net/i40e: finish integration FDIR with generic flow API
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
  2017-09-07 11:20     ` [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
  2017-09-07 11:20     ` [PATCH v2 2/6] ethdev: add GTPC and GTPU items Beilei Xing
@ 2017-09-07 11:21     ` Beilei Xing
  2017-09-07 11:21     ` [PATCH v2 4/6] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                       ` (3 subsequent siblings)
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-07 11:21 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  94 +++++++-
 drivers/net/i40e/i40e_fdir.c   | 490 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  76 +++----
 3 files changed, 597 insertions(+), 63 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 4f0aeda..ca8d201 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -460,6 +460,91 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/**
+ * A union contains the inputs for all types of flow
+ * Items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/**
+ * A structure used to contain extend input of flow
+ */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/**< It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /**< 1 for VF, 0 for port dev */
+	uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
+};
+
+/**
+ * A structure used to define the input for a flow director filter entry
+ */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/**< Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/**< Additional fields to match */
+};
+
+/**
+ * Behavior will be taken if FDIR match
+ */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/**
+ * Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /**< Report nothing. */
+	I40E_FDIR_REPORT_ID,            /**< Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /**< Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /**< Report 8 flex bytes. */
+};
+
+/**
+ * A structure used to define an action when match FDIR packet filter.
+ */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /**< Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /**< Behavior will be taken */
+	enum i40e_fdir_status report_status;  /**< Status report option */
+	/**
+	 * If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/**
+ * A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/**< ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /**< Input set */
+	struct i40e_fdir_action action;  /**< Action taken when match */
+};
+
 /*
  * Structure to store flex pit for flow diretor.
  */
@@ -483,7 +568,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -904,7 +989,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -978,7 +1063,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -991,6 +1076,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 8013add..b0ba819 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -934,6 +939,263 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1007,17 +1269,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1052,7 +1314,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1082,16 +1344,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1373,69 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES(
+			(int)i40e_flowtype_to_pctype(
+			filter->input.flow_type)));
+	} else
+		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
+			    " check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1454,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1142,12 +1464,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
 			hw, I40E_GLQF_FD_PCTYPES(
-			(int)i40e_flowtype_to_pctype(
-			filter->input.flow_type)));
+			(int)filter->input.pctype));
 	} else
-		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+		pctype = filter->input.pctype;
 
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1302,6 +1623,141 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 }
 
 /*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
+	for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if (i40e_check_fdir_programming_status(rxq) >= 0)
+			return 0;
+		rte_delay_us(1);
+	}
+	PMD_DRV_LOG(ERR,
+		 "Failed to program FDIR filter: programming status reported.");
+	return -ETIMEDOUT;
+}
+
+/*
  * i40e_fdir_flush - clear all filters of Flow Director table
  * @pf: board private structure
  */
@@ -1580,7 +2036,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,14 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(flow_type);
-	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2797,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2832,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2855,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2876,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2911,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3877,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3927,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v2 4/6] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
                       ` (2 preceding siblings ...)
  2017-09-07 11:21     ` [PATCH v2 3/6] net/i40e: finish integration FDIR with generic flow API Beilei Xing
@ 2017-09-07 11:21     ` Beilei Xing
  2017-09-07 11:21     ` [PATCH v2 5/6] net/i40e: add cloud filter parsing function for GTP Beilei Xing
                       ` (2 subsequent siblings)
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-07 11:21 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds FDIR support for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c |   1 +
 drivers/net/i40e/i40e_ethdev.h |  11 +++
 drivers/net/i40e/i40e_fdir.c   | 170 +++++++++++++++++++++++++++--------------
 drivers/net/i40e/i40e_flow.c   | 165 ++++++++++++++++++++++++++++++---------
 4 files changed, 255 insertions(+), 92 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5483622..18b3d8c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1051,6 +1051,7 @@ i40e_init_customer_pctype(struct i40e_pf *pf)
 			rte_memcpy(pf->new_pctype[i].name, "GTPU",
 				   sizeof("GTPU"));
 	}
+	pf->new_pctype_used = false;
 }
 
 static int
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ca8d201..9fff85f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -233,6 +233,7 @@ enum i40e_flxpld_layer_idx {
 #define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
+#define I40E_INSET_GTP_TEID              0x0000004000000000ULL
 
 /* bit 48 ~ bit 55 */
 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
@@ -461,6 +462,14 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /**
+ * A structure used to define the input for IPV4 GTP flow
+ */
+struct i40e_gtpv4_flow {
+	struct rte_eth_udpv4_flow udp; /**< IPv4 UDP fields to match. */
+	uint32_t teid;                 /**< TEID in big endian. */
+};
+
+/**
  * A union contains the inputs for all types of flow
  * Items in flows need to be in big endian
  */
@@ -474,6 +483,7 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtpv4_flow     gtpv4_flow;
 };
 
 /**
@@ -888,6 +898,7 @@ struct i40e_pf {
 	struct i40e_tm_conf tm_conf;
 	/* customer personalized pctype */
 	struct i40e_personalized_pctype new_pctype[I40E_PERSONALIZED_MAX];
+	bool new_pctype_used; /* Check if new PCTYPE is used for FDIR */
 };
 
 enum pending_msg {
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index b0ba819..25a9c7d 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,7 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -939,16 +940,33 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static struct i40e_personalized_pctype *
+i40e_flow_fdir_check_new_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+	struct i40e_personalized_pctype *cus_pctype;
+	enum i40e_new_proto i = I40E_PERSONALIZED_GTPC;
+
+	for (; i < I40E_PERSONALIZED_MAX; i++) {
+		cus_pctype = &pf->new_pctype[i];
+		if (pctype == cus_pctype->pctype)
+			return cus_pctype;
+	}
+	return NULL;
+}
+
 static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+				const struct i40e_fdir_input *fdir_input,
 				unsigned char *raw_pkt,
 				bool vlan)
 {
+	struct i40e_personalized_pctype *cus_pctype;
 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
 	uint16_t *ether_type;
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -975,15 +993,13 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
@@ -991,11 +1007,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		/* set len to by default */
 		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
 		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
-					fdir_input->flow.ip4_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ip4_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
-					fdir_input->flow.ip4_flow.ttl :
-					I40E_FDIR_IP_DEFAULT_TTL;
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
 		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
 		/**
 		 * The source and destination fields in the transmitted packet
@@ -1005,12 +1021,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
 		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
 		len += sizeof(struct ipv4_hdr);
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		ip6 = (struct ipv6_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1021,11 +1036,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip6->payload_len =
 			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
 		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
-					fdir_input->flow.ipv6_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ipv6_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
-					fdir_input->flow.ipv6_flow.hop_limits :
-					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+			fdir_input->flow.ipv6_flow.hop_limits :
+			I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
 		/**
 		 * The source and destination fields in the transmitted packet
 		 * need to be presented in a reversed order with respect
@@ -1038,12 +1053,39 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 			   &fdir_input->flow.ipv6_flow.src_ip,
 			   IPV6_ADDR_LEN);
 		len += sizeof(struct ipv6_hdr);
-		break;
-	default:
+	} else if (pf->new_pctype_used) {
+		cus_pctype = i40e_flow_fdir_check_new_pctype(pf, pctype);
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+			fdir_input->flow.ip4_flow.proto :
+			next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+
+		if (!memcmp(cus_pctype->name, "GTPC", sizeof("GTPC")) ||
+		    !memcmp(cus_pctype->name, "GTPU", sizeof("GTPU")))
+			ip->next_proto_id = IPPROTO_UDP;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+	} else {
 		PMD_DRV_LOG(ERR, "unknown pctype %u.",
 			    fdir_input->pctype);
 		return -1;
 	}
+
 	return len;
 }
 
@@ -1058,23 +1100,26 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			     const struct i40e_fdir_input *fdir_input,
 			     unsigned char *raw_pkt)
 {
-	unsigned char *payload, *ptr;
+	unsigned char *payload = NULL;
+	unsigned char *ptr;
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
+	struct i40e_personalized_pctype *cus_pctype;
 
 	/* fill the ethernet and IP head */
-	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
 					      !!fdir_input->flow_ext.vlan_tci);
 	if (len < 0)
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1085,9 +1130,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1098,9 +1141,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1111,15 +1152,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1130,9 +1167,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1143,9 +1178,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1156,14 +1189,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
 		payload = raw_pkt + len;
 		/**
 		 * ARP packet is a special case on which the payload
@@ -1173,10 +1203,34 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 				rte_cpu_to_be_16(ETHER_TYPE_ARP))
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
-		return -EINVAL;
+	} else if (pf->new_pctype_used) {
+		cus_pctype = i40e_flow_fdir_check_new_pctype(pf, pctype);
+		if (!memcmp(cus_pctype->name, "GTPC", sizeof("GTPC")) ||
+		    !memcmp(cus_pctype->name, "GTPU", sizeof("GTPU"))) {
+			udp = (struct udp_hdr *)(raw_pkt + len);
+			udp->dgram_len =
+				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+			gtp = (struct rte_flow_item_gtp *)
+				((unsigned char *)udp + sizeof(struct udp_hdr));
+			gtp->v_pt_rsv_flags = 0x30;
+			gtp->msg_len =
+				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+			gtp->teid = fdir_input->flow.gtpv4_flow.teid;
+			gtp->msg_type = 0x1;
+
+			if (!memcmp(cus_pctype->name, "GTPC", sizeof("GTPC")))
+				udp->dst_port = rte_cpu_to_be_16(2123);
+			else
+				udp->dst_port = rte_cpu_to_be_16(2152);
+
+			payload = (unsigned char *)gtp +
+				sizeof(struct rte_flow_item_gtp);
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
 	}
 
 	/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..6716855 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,22 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +232,22 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1608,14 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
 	/* FDIR - support default flow type with flexible payload */
 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,6 +2338,32 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
 	return 0;
 }
 
+static int
+i40e_flow_find_new_pctype(struct i40e_pf *pf,
+			  enum rte_flow_item_type item_type)
+{
+	struct i40e_personalized_pctype *cus_pctype;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_GTPC:
+		cus_pctype = i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPC);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTPU:
+		cus_pctype = i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPU);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type");
+		break;
+	}
+
+	if (cus_pctype)
+		return cus_pctype->pctype;
+
+	return I40E_INVALID_PCTYPE;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
  * 3. Supported flow type and input set: refer to array
@@ -2326,10 +2388,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	int pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2636,6 +2699,38 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			pctype = i40e_flow_find_new_pctype(pf, item_type);
+			if (pctype == I40E_INVALID_PCTYPE) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported protocol");
+				return -rte_errno;
+			}
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				pf->new_pctype_used = true;
+				input_set |= I40E_INSET_GTP_TEID;
+				filter->input.flow.gtpv4_flow.teid =
+					gtp_spec->teid;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2869,45 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Conflict with the first rule's input set.");
-		return -rte_errno;
-	} else if (ret == -EINVAL) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Invalid pattern mask.");
-		return -rte_errno;
-	}
+	if (!pf->new_pctype_used) {
+		ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Conflict with the first rule's input set.");
+			return -rte_errno;
+		} else if (ret == -EINVAL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Invalid pattern mask.");
+			return -rte_errno;
+		}
 
-	filter->input.pctype = pctype;
+		/* Store flex mask to SW */
+		ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Exceed maximal number of bitmasks");
+			return -rte_errno;
+		} else if (ret == -2) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Conflict with the first flexible rule");
+			return -rte_errno;
+		} else if (ret > 0)
+			cfg_flex_msk = false;
 
-	/* Store flex mask to SW */
-	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Exceed maximal number of bitmasks");
-		return -rte_errno;
-	} else if (ret == -2) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Conflict with the first flexible rule");
-		return -rte_errno;
-	} else if (ret > 0)
-		cfg_flex_msk = false;
+		if (cfg_flex_pit)
+			i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-	if (cfg_flex_pit)
-		i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+		if (cfg_flex_msk)
+			i40e_flow_set_fdir_flex_msk(pf, pctype);
+	}
 
-	if (cfg_flex_msk)
-		i40e_flow_set_fdir_flex_msk(pf, pctype);
+	filter->input.pctype = pctype;
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v2 5/6] net/i40e: add cloud filter parsing function for GTP
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
                       ` (3 preceding siblings ...)
  2017-09-07 11:21     ` [PATCH v2 4/6] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-09-07 11:21     ` Beilei Xing
  2017-09-07 11:21     ` [PATCH v2 6/6] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-07 11:21 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 142 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 144 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9fff85f..f252ff3 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -694,6 +694,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 6716855..be44abb 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1768,6 +1774,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3733,6 +3744,137 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_gtp_pattern(__rte_unused struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v2 6/6] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
                       ` (4 preceding siblings ...)
  2017-09-07 11:21     ` [PATCH v2 5/6] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-09-07 11:21     ` Beilei Xing
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
  6 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-07 11:21 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

GTP-C & GTP-U are not supported by cloud filter due
to limited resource of HW, this patch enables GTP-C
and GTP-U cloud filter by replacing inner_mac and
TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 187 +++++++++++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_ethdev.h |  11 ++-
 drivers/net/i40e/i40e_flow.c   |  26 ++++--
 3 files changed, 196 insertions(+), 28 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 18b3d8c..06e6ee0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7132,7 +7132,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
-	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 3 entries */
@@ -7180,12 +7180,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7203,12 +7203,129 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace.tr_bit = 22 | 0x80;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace.tr_bit = 21 | 0x80;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 
@@ -7317,6 +7434,36 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		big_buffer = 1;
 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
 		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
 			ret = i40e_cloud_filter_qinq_create(pf);
@@ -7343,13 +7490,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 
 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
-			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+			I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	else {
 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
 						&pfilter->element.flags);
@@ -10870,14 +11023,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 			   sizeof(f->input.general_fields));
 
 		if (((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
 			big_buffer = 1;
 
 		if (big_buffer)
@@ -11087,7 +11240,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 2 entries */
@@ -11118,13 +11271,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L2 filter, input for L2 filter will be L1 filter  */
 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index f252ff3..087ff4d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -643,10 +643,12 @@ struct i40e_ethertype_rule {
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10 0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11 0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13 0x13
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -896,6 +898,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 	/* customer personalized pctype */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index be44abb..7eb02fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -3751,15 +3751,17 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
  *    filled with 0.
  */
 static int
-i40e_flow_parse_gtp_pattern(__rte_unused struct rte_eth_dev *dev,
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
 			    const struct rte_flow_item *pattern,
 			    struct rte_flow_error *error,
 			    struct i40e_tunnel_filter_conf *filter)
 {
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
 	const struct rte_flow_item_gtp *gtp_spec;
 	const struct rte_flow_item_gtp *gtp_mask;
 	enum rte_flow_item_type item_type;
+	int pctype = 0;
 
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
@@ -3809,6 +3811,16 @@ i40e_flow_parse_gtp_pattern(__rte_unused struct rte_eth_dev *dev,
 			gtp_mask =
 				(const struct rte_flow_item_gtp *)item->mask;
 
+			pctype = i40e_flow_find_new_pctype(pf,
+							   item_type);
+			if (pctype == I40E_INVALID_PCTYPE) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported protocol");
+				return -rte_errno;
+			}
+
 			if (!gtp_spec || !gtp_mask) {
 				rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
@@ -4245,12 +4257,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		vsi = vf->vsi;
 	}
 
-	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
 		big_buffer = 1;
 
 	if (big_buffer)
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* Re: [PATCH v2 2/6] ethdev: add GTPC and GTPU items
  2017-09-07 11:20     ` [PATCH v2 2/6] ethdev: add GTPC and GTPU items Beilei Xing
@ 2017-09-07 12:19       ` Adrien Mazarguil
  2017-09-12  6:40         ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Adrien Mazarguil @ 2017-09-07 12:19 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

Hi Beilei,

I assume this patch supersedes [1]?

[1] http://dpdk.org/ml/archives/dev/2017-August/073501.html

Thanks for merging testpmd and the API change as a single patch, I still
have a few comments, see below.

(please add "flow API" somewhere in the title by the way)

On Thu, Sep 07, 2017 at 07:20:59PM +0800, Beilei Xing wrote:
> This patch adds GTPC and GTPU items to generic rte
> flow, and also exposes the following item fields
> through the flow command:
> 
> - GTPC TEID
> - GTPU TEID
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>

Won't there be a need to match nonspecific GTP traffic as well (both GTP-C
and GTP-U a once), since they use the same structure?

I'm not familiar with the protocol at all so I wonder if you should maybe
leave the GTP item in addition to those two.

> ---
>  app/test-pmd/cmdline_flow.c                 | 44 +++++++++++++++++++++++++++++
>  app/test-pmd/config.c                       |  2 ++
>  doc/guides/prog_guide/rte_flow.rst          | 26 +++++++++++++++++
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  8 ++++++
>  lib/librte_ether/rte_flow.h                 | 44 +++++++++++++++++++++++++++++
>  5 files changed, 124 insertions(+)
> 
> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> index a17a004..72d159c 100644
> --- a/app/test-pmd/cmdline_flow.c
> +++ b/app/test-pmd/cmdline_flow.c
> @@ -171,6 +171,10 @@ enum index {
>  	ITEM_GRE_PROTO,
>  	ITEM_FUZZY,
>  	ITEM_FUZZY_THRESH,
> +	ITEM_GTPC,
> +	ITEM_GTPC_TEID,
> +	ITEM_GTPU,
> +	ITEM_GTPU_TEID,

You could refactor the TEID parameter since they use the same
structure. Might be useful if you add nonspecific GTP:

 ITEM_GTP,
 ITEM_GTP_TEID,
 ITEM_GTPC,
 ITEM_GTPU,

>  
>  	/* Validate/create actions. */
>  	ACTIONS,
> @@ -451,6 +455,8 @@ static const enum index next_item[] = {
>  	ITEM_MPLS,
>  	ITEM_GRE,
>  	ITEM_FUZZY,
> +	ITEM_GTPC,
> +	ITEM_GTPU,
>  	ZERO,
>  };
>  
> @@ -588,6 +594,18 @@ static const enum index item_gre[] = {
>  	ZERO,
>  };
>  
> +static const enum index item_gtpc[] = {
> +	ITEM_GTPC_TEID,
> +	ITEM_NEXT,
> +	ZERO,
> +};
> +
> +static const enum index item_gtpu[] = {
> +	ITEM_GTPU_TEID,
> +	ITEM_NEXT,
> +	ZERO,
> +};

A single array is necessary, item_gtp[].

> +
>  static const enum index next_action[] = {
>  	ACTION_END,
>  	ACTION_VOID,
> @@ -1421,6 +1439,32 @@ static const struct token token_list[] = {
>  		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
>  					thresh)),
>  	},
> +	[ITEM_GTPC] = {
> +		.name = "gtpc",
> +		.help = "match GTP header",
> +		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
> +		.next = NEXT(item_gtpc),
> +		.call = parse_vc,
> +	},
> +	[ITEM_GTPC_TEID] = {
> +		.name = "teid",
> +		.help = "TUNNEL ENDPOINT IDENTIFIER",

Please don't shout, "tunnel endpoint identifier" is fine.

> +		.next = NEXT(item_gtpc, NEXT_ENTRY(UNSIGNED), item_param),
> +		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
> +	},
> +	[ITEM_GTPU] = {
> +		.name = "gtpu",
> +		.help = "match GTP header",
> +		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
> +		.next = NEXT(item_gtpu),
> +		.call = parse_vc,
> +	},
> +	[ITEM_GTPU_TEID] = {
> +		.name = "teid",
> +		.help = "TUNNEL ENDPOINT IDENTIFIER",

Same comment here, however the a single TEID entry is necessary as
previously described.

> +		.next = NEXT(item_gtpu, NEXT_ENTRY(UNSIGNED), item_param),
> +		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
> +	},
>  
>  	/* Validate/create actions. */
>  	[ACTIONS] = {
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
> index 3ae3e1c..be4c3b9 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -947,6 +947,8 @@ static const struct {
>  	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
>  	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
>  	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),

Remember to add GTP here assuming it makes sense.

> +	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
> +	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
>  };
>  
>  /** Compute storage space needed by item specification. */
> diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
> index 662a912..9e7179a 100644
> --- a/doc/guides/prog_guide/rte_flow.rst
> +++ b/doc/guides/prog_guide/rte_flow.rst
> @@ -955,6 +955,32 @@ Usage example, fuzzy match a TCPv4 packets:
>     | 4     | END      |
>     +-------+----------+
>  
> +Item: ``GTPC``
> +^^^^^^^^^^^^^^
> +
> +Matches a GTP header.
> +
> +- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
> +  extension header flag (1b), sequence number flag (1b), N-PDU number
> +  flag (1b).
> +- ``msg_type``: message type.
> +- ``msg_len``: message length.
> +- ``teid``: TEID.
> +- Default ``mask`` matches teid only.
> +
> +Item: ``GTPU``
> +^^^^^^^^^^^^^^
> +
> +Matches a GTP header.
> +
> +- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
> +  extension header flag (1b), sequence number flag (1b), N-PDU number
> +  flag (1b).
> +- ``msg_type``: message type.
> +- ``msg_len``: message length.
> +- ``teid``: TEID.
> +- Default ``mask`` matches teid only.
> +

You can use a single section to describe all three items at once since they
map to a common structure:

 Item: ``GTP``, ``GTPC``, ``GTPU``:
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Then elaborate a bit on the the differences between them.

>  Actions
>  ~~~~~~~
>  
> diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> index 2ed62f5..2ca36ad 100644
> --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> @@ -2696,6 +2696,14 @@ This section lists supported pattern items and their attributes, if any.
>  
>    - ``thresh {unsigned}``: accuracy threshold.
>  
> +- ``gtpc``: match GTP header.
> +
> +  - ``teid {unsigned}``: Tunnel endpoint identifier.

Tunnel => tunnel

> +
> +- ``gtpu``: match GTP header.
> +
> +  - ``teid {unsigned}``: Tunnel endpoint identifier.

You could also merge all three items here, e.g.:

 - ``gtp``, ``gtpc``, ``gtpu``: ...

> +
>  Actions list
>  ^^^^^^^^^^^^
>  
> diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
> index bba6169..8b24cac 100644
> --- a/lib/librte_ether/rte_flow.h
> +++ b/lib/librte_ether/rte_flow.h
> @@ -309,6 +309,24 @@ enum rte_flow_item_type {
>  	 * See struct rte_flow_item_fuzzy.
>  	 */
>  	RTE_FLOW_ITEM_TYPE_FUZZY,
> +
> +	/**
> +	 * Matches a GTP header.

Write "GTP-U" to make clear this is not nonspecific "GTP" matching.

> +	 *
> +	 * Configure flow for GTP-C packets.
> +	 *
> +	 * See struct rte_flow_item_gtp.
> +	 */
> +	RTE_FLOW_ITEM_TYPE_GTPC,
> +
> +	/**
> +	 * Matches a GTP header.

"GTP-C" here.

> +	 *
> +	 * Configure flow for GTP-U packets.
> +	 *
> +	 * See struct rte_flow_item_gtp.
> +	 */
> +	RTE_FLOW_ITEM_TYPE_GTPU,
>  };
>  
>  /**
> @@ -735,6 +753,32 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
>  #endif
>  
>  /**
> + * RTE_FLOW_ITEM_TYPE_GTP.

You need to mention the others, something like:

 RTE_FLOW_ITEM_TYPE_GTP, RTE_FLOW_ITEM_TYPE_GTPC and RTE_FLOW_ITEM_TYPE_GTPU.

> + *
> + * Matches a GTP header.

Similarly:

 Matches a nonspecific GTP, a GTP-C or a GTP-U header.

> + */
> +struct rte_flow_item_gtp {
> +	/**
> +	 * Version (2b), protocol type (1b), reserved (1b),
> +	 * Extension header flag (1b),
> +	 * Sequence number flag (1b),

Extension => extension
sequence => sequence

> +	 * N-PDU number flag (1b).
> +	 */
> +	uint8_t v_pt_rsv_flags;
> +	uint8_t msg_type; /**< Message type. */
> +	rte_be16_t msg_len; /**< Message length. */
> +	rte_be32_t teid; /**< Tunnel endpoint identifier. */
> +};
> +
> +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
> +#ifndef __cplusplus
> +static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
> +	.msg_type = 0x00,

The above field is not necessary since you're not initializing the entire
structure, the rest is set to 0 by default.

> +	.teid = RTE_BE32(0xffffffff),
> +};
> +#endif
> +
> +/**
>   * Matching pattern item definition.
>   *
>   * A pattern is formed by stacking items starting from the lowest protocol
> -- 
> 2.5.5
> 

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v2 2/6] ethdev: add GTPC and GTPU items
  2017-09-07 12:19       ` Adrien Mazarguil
@ 2017-09-12  6:40         ` Xing, Beilei
  2017-09-12 10:46           ` Adrien Mazarguil
  0 siblings, 1 reply; 116+ messages in thread
From: Xing, Beilei @ 2017-09-12  6:40 UTC (permalink / raw)
  To: Adrien Mazarguil; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

Hi Adrien,

> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Thursday, September 7, 2017 8:20 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 2/6] ethdev: add GTPC and GTPU items
> 
> Hi Beilei,
> 
> I assume this patch supersedes [1]?
> 
> [1] http://dpdk.org/ml/archives/dev/2017-August/073501.html
> 
> Thanks for merging testpmd and the API change as a single patch, I still have
> a few comments, see below.
> 
> (please add "flow API" somewhere in the title by the way)

Thanks for all your comments.
Yes, http://dpdk.org/ml/archives/dev/2017-August/073501.html is superseded and did merging testpmd and API change.
I will update title in next version.

> 
> On Thu, Sep 07, 2017 at 07:20:59PM +0800, Beilei Xing wrote:
> > This patch adds GTPC and GTPU items to generic rte flow, and also
> > exposes the following item fields through the flow command:
> >
> > - GTPC TEID
> > - GTPU TEID
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> 
> Won't there be a need to match nonspecific GTP traffic as well (both GTP-C
> and GTP-U a once), since they use the same structure?
> 
> I'm not familiar with the protocol at all so I wonder if you should maybe leave
> the GTP item in addition to those two.
> 

Agree, I will leave the GTP item in next version.

GTP-C and GTP-U use the same structure, the difference between them is UDP port, 2123 is for GTP-C, and 2152 is for GTP-U.
Add GTP-C and GTP -U item since I want to design a user-friendly CLI.

For example, if user wants to add such flow: assign GTP-U packets with TEID 0x123456 to queue 14. 
Then use can use following CLI:
flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x123456 / end actions queue index 14 / end
instead of below CLI to distinguish GTP-C and GTP-U with UDP port:
flow create 0 ingress pattern eth / ipv4 / udp dst spec 2125 dst mask 0 / gtp teid is 0x123456 / end actions queue index 14 / end

And all your other comments will be addressed in next version, thanks.

Beilei

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v2 2/6] ethdev: add GTPC and GTPU items
  2017-09-12  6:40         ` Xing, Beilei
@ 2017-09-12 10:46           ` Adrien Mazarguil
  2017-09-13  3:09             ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Adrien Mazarguil @ 2017-09-12 10:46 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

Hi Beilei,

On Tue, Sep 12, 2017 at 06:40:50AM +0000, Xing, Beilei wrote:
> Hi Adrien,
> 
> > -----Original Message-----
> > From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> > Sent: Thursday, September 7, 2017 8:20 PM
> > To: Xing, Beilei <beilei.xing@intel.com>
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> > <andrey.chilikin@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v2 2/6] ethdev: add GTPC and GTPU items
> > 
> > Hi Beilei,
> > 
> > I assume this patch supersedes [1]?
> > 
> > [1] http://dpdk.org/ml/archives/dev/2017-August/073501.html
> > 
> > Thanks for merging testpmd and the API change as a single patch, I still have
> > a few comments, see below.
> > 
> > (please add "flow API" somewhere in the title by the way)
> 
> Thanks for all your comments.
> Yes, http://dpdk.org/ml/archives/dev/2017-August/073501.html is superseded and did merging testpmd and API change.
> I will update title in next version.

All right, thanks.

> > On Thu, Sep 07, 2017 at 07:20:59PM +0800, Beilei Xing wrote:
> > > This patch adds GTPC and GTPU items to generic rte flow, and also
> > > exposes the following item fields through the flow command:
> > >
> > > - GTPC TEID
> > > - GTPU TEID
> > >
> > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > 
> > Won't there be a need to match nonspecific GTP traffic as well (both GTP-C
> > and GTP-U a once), since they use the same structure?
> > 
> > I'm not familiar with the protocol at all so I wonder if you should maybe leave
> > the GTP item in addition to those two.
> > 
> 
> Agree, I will leave the GTP item in next version.
> 
> GTP-C and GTP-U use the same structure, the difference between them is UDP port, 2123 is for GTP-C, and 2152 is for GTP-U.
> Add GTP-C and GTP -U item since I want to design a user-friendly CLI.
> 
> For example, if user wants to add such flow: assign GTP-U packets with TEID 0x123456 to queue 14. 
> Then use can use following CLI:
> flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x123456 / end actions queue index 14 / end
> instead of below CLI to distinguish GTP-C and GTP-U with UDP port:
> flow create 0 ingress pattern eth / ipv4 / udp dst spec 2125 dst mask 0 / gtp teid is 0x123456 / end actions queue index 14 / end

I agree with you from a usability standpoint it's much nicer. I have one
question though, could one send GTP-C / GTP-U traffic using nondefault UDP
ports and expect hardware to match it without explicitly specifying a port
in the UDP item, that is, is there some property in GTP-C / GTP-U traffic
outside the UDP port that would theoretically allow a host (even a stateful
one) to tell them apart?

If it really depends on the UDP port only, not specifying one will use
hardware defaults regardless of the item (GTP / GTP-U / GTP-C). However if
like VXLAN, this default value can be modified outside of rte_flow, most
users will have to specify it regardless in order to get consistent results
across various vendors/adapters.

In any case I don't mind three items. GTP encompasses both GTP-U and GTP-C
(possibly two different UDP ports at once), while GTP-U and GTP-C match
exactly one. You only have to describe this properly in the documentation.

Thanks.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v2 2/6] ethdev: add GTPC and GTPU items
  2017-09-12 10:46           ` Adrien Mazarguil
@ 2017-09-13  3:09             ` Xing, Beilei
  0 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-13  3:09 UTC (permalink / raw)
  To: Adrien Mazarguil; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

Hi Adrien,

> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Tuesday, September 12, 2017 6:47 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 2/6] ethdev: add GTPC and GTPU items
> 
> Hi Beilei,
> 
> On Tue, Sep 12, 2017 at 06:40:50AM +0000, Xing, Beilei wrote:
> > Hi Adrien,
> >
> > > -----Original Message-----
> > > From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> > > Sent: Thursday, September 7, 2017 8:20 PM
> > > To: Xing, Beilei <beilei.xing@intel.com>
> > > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> > > <andrey.chilikin@intel.com>; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH v2 2/6] ethdev: add GTPC and GTPU
> > > items
> > >
> > > Hi Beilei,
> > >
> > > I assume this patch supersedes [1]?
> > >
> > > [1] http://dpdk.org/ml/archives/dev/2017-August/073501.html
> > >
> > > Thanks for merging testpmd and the API change as a single patch, I
> > > still have a few comments, see below.
> > >
> > > (please add "flow API" somewhere in the title by the way)
> >
> > Thanks for all your comments.
> > Yes, http://dpdk.org/ml/archives/dev/2017-August/073501.html is
> superseded and did merging testpmd and API change.
> > I will update title in next version.
> 
> All right, thanks.
> 
> > > On Thu, Sep 07, 2017 at 07:20:59PM +0800, Beilei Xing wrote:
> > > > This patch adds GTPC and GTPU items to generic rte flow, and also
> > > > exposes the following item fields through the flow command:
> > > >
> > > > - GTPC TEID
> > > > - GTPU TEID
> > > >
> > > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > >
> > > Won't there be a need to match nonspecific GTP traffic as well (both
> > > GTP-C and GTP-U a once), since they use the same structure?
> > >
> > > I'm not familiar with the protocol at all so I wonder if you should
> > > maybe leave the GTP item in addition to those two.
> > >
> >
> > Agree, I will leave the GTP item in next version.
> >
> > GTP-C and GTP-U use the same structure, the difference between them is
> UDP port, 2123 is for GTP-C, and 2152 is for GTP-U.
> > Add GTP-C and GTP -U item since I want to design a user-friendly CLI.
> >
> > For example, if user wants to add such flow: assign GTP-U packets with
> TEID 0x123456 to queue 14.
> > Then use can use following CLI:
> > flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x123456
> > / end actions queue index 14 / end instead of below CLI to distinguish
> GTP-C and GTP-U with UDP port:
> > flow create 0 ingress pattern eth / ipv4 / udp dst spec 2125 dst mask
> > 0 / gtp teid is 0x123456 / end actions queue index 14 / end
> 
> I agree with you from a usability standpoint it's much nicer. I have one
> question though, could one send GTP-C / GTP-U traffic using nondefault UDP
> ports and expect hardware to match it without explicitly specifying a port in
> the UDP item, that is, is there some property in GTP-C / GTP-U traffic outside
> the UDP port that would theoretically allow a host (even a stateful
> one) to tell them apart?
> 
> If it really depends on the UDP port only, not specifying one will use
> hardware defaults regardless of the item (GTP / GTP-U / GTP-C). However if
> like VXLAN, this default value can be modified outside of rte_flow, most
> users will have to specify it regardless in order to get consistent results
> across various vendors/adapters.

As far as I know, there's no other property in GTP-C / GTP-U traffic outside the UDP port to distinguish GTP-C and GTP-U. And I don't think the value can be modified by user just like VXLAN.

> 
> In any case I don't mind three items. GTP encompasses both GTP-U and
> GTP-C (possibly two different UDP ports at once), while GTP-U and GTP-C
> match exactly one. You only have to describe this properly in the
> documentation.

OK, I will describe it in documentation.

Beilei

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U
  2017-09-07 11:20     ` [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
@ 2017-09-18 14:17       ` Bruce Richardson
  2017-09-18 14:21         ` Bruce Richardson
  0 siblings, 1 reply; 116+ messages in thread
From: Bruce Richardson @ 2017-09-18 14:17 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On Thu, Sep 07, 2017 at 07:20:58PM +0800, Beilei Xing wrote:
> GTP-C and GTP-U are supported by new profile.
> Enable RSS for GTP-C and GTP-U after downloading
> profile.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
I get compilation errors when applying this patch.

/Bruce

  CC i40e_ethdev.o
/home/bruce/dpdk.org/drivers/net/i40e/i40e_ethdev.c: In function ‘i40e_update_personalized_pctype’:
/home/bruce/dpdk.org/drivers/net/i40e/i40e_ethdev.c:10965:3: error: ‘RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM’ undeclared (first use in this function); did you mean ‘RTE_PMD_I40E_PKG_INFO_DEVID_NUM’?
   RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
   ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   RTE_PMD_I40E_PKG_INFO_DEVID_NUM
compilation terminated due to -Wfatal-errors.

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U
  2017-09-18 14:17       ` Bruce Richardson
@ 2017-09-18 14:21         ` Bruce Richardson
  0 siblings, 0 replies; 116+ messages in thread
From: Bruce Richardson @ 2017-09-18 14:21 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On Mon, Sep 18, 2017 at 03:17:43PM +0100, Bruce Richardson wrote:
> On Thu, Sep 07, 2017 at 07:20:58PM +0800, Beilei Xing wrote:
> > GTP-C and GTP-U are supported by new profile.
> > Enable RSS for GTP-C and GTP-U after downloading
> > profile.
> > 
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> I get compilation errors when applying this patch.
> 
> /Bruce
> 
>   CC i40e_ethdev.o
> /home/bruce/dpdk.org/drivers/net/i40e/i40e_ethdev.c: In function ‘i40e_update_personalized_pctype’:
> /home/bruce/dpdk.org/drivers/net/i40e/i40e_ethdev.c:10965:3: error: ‘RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM’ undeclared (first use in this function); did you mean ‘RTE_PMD_I40E_PKG_INFO_DEVID_NUM’?
>    RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
>    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>    RTE_PMD_I40E_PKG_INFO_DEVID_NUM
> compilation terminated due to -Wfatal-errors.
> 
Sorry, my mistake. Missed the dependency on the earlier patch.

/Bruce

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v3 4/8] ethdev: add GTP items to support flow API
  2017-09-22 22:35       ` [PATCH v3 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-22 13:39         ` Adrien Mazarguil
  0 siblings, 0 replies; 116+ messages in thread
From: Adrien Mazarguil @ 2017-09-22 13:39 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On Sat, Sep 23, 2017 at 06:35:10AM +0800, Beilei Xing wrote:
> This patch adds GTP, GTPC and GTPU items for
> generic flow API, and also exposes item fields
> through the flow command.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
<snip>
> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> +static const enum index item_gtp[] = {
> +	ITEM_GTP_TEID,
> +	ITEM_NEXT,
> +	ZERO,
> +};
> +
> +static const enum index item_gtpc[] = {
> +	ITEM_GTP_TEID,
> +	ITEM_NEXT,
> +	ZERO,
> +};
> +
> +static const enum index item_gtpu[] = {
> +	ITEM_GTP_TEID,
> +	ITEM_NEXT,
> +	ZERO,
> +};
<snip>

Only item_gtp[] needs to be defined. GTPC and GTPU should use the same
array. Apart from that:

Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v3 0/8] GPT-C and GTP-U enabling
  2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
                       ` (5 preceding siblings ...)
  2017-09-07 11:21     ` [PATCH v2 6/6] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
@ 2017-09-22 22:35     ` Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 1/8] mbuf: support GTP in software packet type parser Beilei Xing
                         ` (7 more replies)
  6 siblings, 8 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
It depends on Kirill's patch:
http://dpdk.org/ml/archives/dev/2017-September/076035.html

v3 changes:
 - Rework implementation to support the new profile.
 - Add GTPC and GTPU tunnel type in software packet type parser.
 - Update ptype info when loading profile.
 - Fix bug of updating pctype info.


v2 changes:
 - Enable RSS/FDIR/cloud filter dinamicly by checking profile
 - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
 - Rework FDIR/cloud filter enabling function
   

Beilei Xing (8):
  net: support GTP in software packet type parser
  net/i40e: update ptype and pctype info
  net/i40e: support RSS for new pctype
  ethdev: add GTP items to support flow API
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  52 +++
 app/test-pmd/config.c                       |   3 +
 doc/guides/prog_guide/rte_flow.rst          |  18 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
 drivers/net/i40e/i40e_ethdev.c              | 530 +++++++++++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h              | 170 ++++++++-
 drivers/net/i40e/i40e_fdir.c                | 570 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 486 ++++++++++++++++++++----
 drivers/net/i40e/rte_pmd_i40e.c             |   6 +-
 lib/librte_ether/rte_flow.h                 |  52 +++
 lib/librte_mbuf/rte_mbuf_ptype.c            |   2 +
 lib/librte_mbuf/rte_mbuf_ptype.h            |  24 ++
 12 files changed, 1791 insertions(+), 126 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v3 1/8] mbuf: support GTP in software packet type parser
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  2017-09-25  9:21         ` Olivier MATZ
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 2/8] net/i40e: update ptype and pctype info Beilei Xing
                         ` (6 subsequent siblings)
  7 siblings, 2 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
 lib/librte_mbuf/rte_mbuf_ptype.h | 24 ++++++++++++++++++++++++
 2 files changed, 26 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index e5c4fae..a450814 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -89,6 +89,8 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
 	case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
 	case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
 	case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
+	case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
+	case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
 	default: return "TUNNEL_UNKNOWN";
 	}
 }
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index acd70bb..eb7cd2c 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -383,6 +383,30 @@ extern "C" {
  */
 #define RTE_PTYPE_TUNNEL_GRENAT             0x00006000
 /**
+ * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2123>
+ */
+#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
+/**
+ * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2152>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2152>
+ */
+#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
+/**
  * Mask of tunneling packet types.
  */
 #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v3 2/8] net/i40e: update ptype and pctype info
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 1/8] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  2017-09-23  2:58         ` Wu, Jingjing
  2017-09-22 22:35       ` [PATCH v3 3/8] net/i40e: support RSS for new pctype Beilei Xing
                         ` (5 subsequent siblings)
  7 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Update new packet type and new pctype info when downloading
profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c  | 312 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h  |  24 ++++
 drivers/net/i40e/rte_pmd_i40e.c |   6 +-
 3 files changed, 341 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 720f067..dcff8cc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -65,6 +65,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -1036,6 +1037,18 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+i40e_init_customer_pctype(struct i40e_pf *pf)
+{
+	int i;
+
+	for (i = I40E_PERSONALIZED_GTPC; i < I40E_PERSONALIZED_MAX; i++) {
+		pf->new_pctype[i].index = i;
+		pf->new_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+		pf->new_pctype[i].valid = false;
+	}
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1301,6 +1314,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
+	i40e_init_customer_pctype(pf);
+
 	ret = i40e_init_ethtype_filter_list(dev);
 	if (ret < 0)
 		goto err_init_ethtype_filter_list;
@@ -10893,6 +10908,303 @@ is_i40e_supported(struct rte_eth_dev *dev)
 	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
+struct i40e_personalized_pctype*
+i40e_find_personalized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+	int i;
+
+	for (i = 0; i < I40E_PERSONALIZED_MAX; i++) {
+		if (pf->new_pctype[i].index == index)
+			return &pf->new_pctype[i];
+	}
+	return NULL;
+}
+
+static int
+i40e_update_personalized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+				uint32_t pkg_size, uint32_t proto_num,
+				struct rte_pmd_i40e_proto_info *proto)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t pctype_num;
+	struct rte_pmd_i40e_ptype_info *pctype;
+	struct i40e_personalized_pctype *new_pctype = NULL;
+	uint8_t proto_id;
+	uint8_t pctype_value;
+	char name[64];
+	uint32_t i, j, n;
+	int ret;
+
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&pctype_num, sizeof(pctype_num),
+				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype number");
+		return -1;
+	}
+	if (!pctype_num) {
+		PMD_DRV_LOG(INFO, "No new pctype added");
+		return -1;
+	}
+
+	pctype = rte_zmalloc("new_pctype",
+			    pctype_num * sizeof(struct rte_pmd_i40e_ptype_info),
+			    0);
+	if (!pctype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+	/* get information about new pctype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)pctype, pctype_num,
+					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype list");
+		rte_free(pctype);
+		return -1;
+	}
+
+	/* Update personalized pctype. */
+	for (i = 0; i < pctype_num; i++) {
+		pctype_value = pctype[i].ptype_id;
+		memset(name, 0, sizeof(name));
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = pctype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				strcat(name, proto[n].name);
+				strcat(name, "_");
+				break;
+			}
+		}
+		if (!memcmp(name, "GTPC", sizeof("GTPC") - 1))
+			new_pctype =
+				i40e_find_personalized_pctype(pf,
+						      I40E_PERSONALIZED_GTPC);
+		else if (!memcmp(name, "GTPU_IPV4",
+				 sizeof("GTPU_IPV4") - 1))
+			new_pctype =
+				i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPU_IPV4);
+		else if (!memcmp(name, "GTPU_IPV6",
+				 sizeof("GTPU_IPV6") - 1))
+			new_pctype =
+				i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPU_IPV6);
+		else if (!memcmp(name, "GTPU", sizeof("GTPU") - 1))
+			new_pctype =
+				i40e_find_personalized_pctype(pf,
+						      I40E_PERSONALIZED_GTPU);
+		if (new_pctype) {
+			new_pctype->pctype = pctype_value;
+			new_pctype->valid = true;
+		}
+	}
+
+	rte_free(pctype);
+	return 0;
+}
+
+static int
+i40e_update_personalized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+			       uint32_t pkg_size, uint32_t proto_num,
+			       struct rte_pmd_i40e_proto_info *proto)
+{
+	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+	uint8_t port_id = dev->data->port_id;
+	uint32_t ptype_num;
+	struct rte_pmd_i40e_ptype_info *ptype;
+	uint8_t proto_id;
+	char name[16];
+	uint32_t i, j, n;
+	int ip_id = 0;
+	int ret;
+
+	/* get information about new ptype num */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&ptype_num, sizeof(ptype_num),
+				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype number");
+		return -1;
+	}
+	if (!ptype_num) {
+		PMD_DRV_LOG(INFO, "No new ptype added");
+		return -1;
+	}
+	/* get information about new ptype list */
+	ptype = rte_zmalloc("new_ptype",
+			    ptype_num * sizeof(struct rte_pmd_i40e_ptype_info),
+			    0);
+	if (!ptype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)ptype, ptype_num,
+					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype list");
+		rte_free(ptype);
+		return -1;
+	}
+
+	ptype_mapping = rte_zmalloc("ptype_mapping",
+				    ptype_num *
+				    sizeof(struct rte_pmd_i40e_ptype_mapping),
+				    0);
+	if (!ptype_mapping) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		rte_free(ptype);
+		return -1;
+	}
+
+	/* Update ptype mapping table. */
+	for (i = 0; i < ptype_num; i++) {
+		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+		ptype_mapping[i].sw_ptype = 0;
+		ip_id = 0;
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = ptype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				memset(name, 0, sizeof(name));
+				strcpy(name, proto[n].name);
+				if (!memcmp(name, "IPV4", sizeof("IPV4") - 1)) {
+					if (ip_id == 0) {
+						ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+						ip_id++;
+					} else if (ip_id == 1) {
+						ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L3_IPV4;
+						ip_id++;
+					} else if (ip_id == 2)
+						ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_NONFRAG;
+				} else if (!memcmp(name, "IPV6",
+						   sizeof("IPV6") - 1)) {
+					if (ip_id == 0) {
+						ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+						ip_id++;
+					} else if (ip_id == 1) {
+						ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L3_IPV6;
+						ip_id++;
+					} else if (ip_id == 2)
+						ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!memcmp(name, "GTPC",
+						   sizeof("GTPC")))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPC;
+				else if (!memcmp(name, "GTPU", sizeof("GTPU")))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPU;
+				else if (!memcmp(name, "UDP", sizeof("UDP")))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_UDP;
+				else if (!memcmp(name, "TCP", sizeof("TCP")))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_TCP;
+				else if (!memcmp(name, "SCTP", sizeof("SCTP")))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_SCTP;
+				else if (!memcmp(name, "ICMP",
+						 sizeof("ICMP") - 1))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_ICMP;
+
+				break;
+			}
+		}
+	}
+
+	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+						ptype_num, 0);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+		rte_free(ptype_mapping);
+		rte_free(ptype);
+		return -1;
+	}
+
+	rte_free(ptype_mapping);
+	rte_free(ptype);
+	return 0;
+}
+
+void
+i40e_update_personalized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t proto_num;
+	struct rte_pmd_i40e_proto_info *proto;
+	uint32_t i;
+	int ret;
+
+	/* get information about protocol number */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				       (uint8_t *)&proto_num, sizeof(proto_num),
+				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol number");
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new protocol added");
+		return;
+	}
+
+	proto = rte_zmalloc("new_proto",
+			    proto_num * sizeof(struct rte_pmd_i40e_proto_info),
+			    0);
+	if (!proto) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return;
+	}
+
+	/* get information about protocol list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)proto, proto_num,
+					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol list");
+		rte_free(proto);
+		return;
+	}
+
+	/* Check if GTP is supported. */
+	for (i = 0; i < proto_num; i++) {
+		if (!memcmp(proto[i].name, "GTP", sizeof("GTP") - 1)) {
+			pf->gtp_support = true;
+			break;
+		}
+	}
+
+	/* Update pctype info */
+	ret = i40e_update_personalized_pctype(dev, pkg, pkg_size,
+					      proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+	/* Update ptype info */
+	ret = i40e_update_personalized_ptype(dev, pkg, pkg_size,
+					      proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+	rte_free(proto);
+}
+
 /* Create a QinQ cloud filter
  *
  * The Fortville NIC has limited resources for tunnel filters,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ad80f0f..85ae07b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -722,6 +722,21 @@ struct i40e_tm_conf {
 	bool committed;
 };
 
+enum i40e_new_proto {
+	I40E_PERSONALIZED_GTPC = 0,
+	I40E_PERSONALIZED_GTPU_IPV4,
+	I40E_PERSONALIZED_GTPU_IPV6,
+	I40E_PERSONALIZED_GTPU,
+	I40E_PERSONALIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID     0
+struct i40e_personalized_pctype {
+	uint8_t index;    /* Indicate which personalized pctype */
+	uint8_t pctype;   /* New pctype value */
+	bool valid;   /* Check if it's valid */
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -786,6 +801,11 @@ struct i40e_pf {
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
+
+	/* Dynamic Device Personalization */
+	bool gtp_support; /* 1 - support GTP-C and GTP-U */
+	/* customer personalized pctype */
+	struct i40e_personalized_pctype new_pctype[I40E_PERSONALIZED_MAX];
 };
 
 enum pending_msg {
@@ -1003,6 +1023,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_personalized_pctype*
+i40e_find_personalized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_personalized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+				   uint32_t pkg_size);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index 9f9c808..d1313f6 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1608,6 +1608,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -EINVAL;
 	}
 
+	i40e_update_personalized_info(dev, buff, size);
+
 	/* Find metadata segment */
 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
 							pkg_hdr);
@@ -2090,7 +2092,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
 	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
 	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
 	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
-	    tnl != RTE_PTYPE_TUNNEL_GRENAT)
+	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPU)
 		return -1;
 
 	if (il2 &&
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v3 3/8] net/i40e: support RSS for new pctype
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 1/8] mbuf: support GTP in software packet type parser Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 4/8] ethdev: add GTP items to support flow API Beilei Xing
                         ` (4 subsequent siblings)
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Enable RSS for new pctypes after downloading
new profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dcff8cc..a15d994 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1924,6 +1924,30 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, true);
 }
 
+static void
+i40e_new_pctype_hash_set(struct i40e_pf *pf, bool enable)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint64_t hena;
+	int i;
+
+	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+	for (i = 0; i < I40E_PERSONALIZED_MAX; i++) {
+		if (pf->new_pctype[i].valid) {
+			if (enable)
+				hena |= 1ULL << pf->new_pctype[i].pctype;
+			else
+				hena &= ~(1ULL << pf->new_pctype[i].pctype);
+		}
+	}
+
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+	I40E_WRITE_FLUSH(hw);
+}
+
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
@@ -2071,6 +2095,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	i40e_new_pctype_hash_set(pf, true);
+
 	return I40E_SUCCESS;
 
 err_up:
@@ -2151,6 +2177,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 	uint32_t reg;
 	int i;
 
+	i40e_new_pctype_hash_set(pf, false);
+
 	PMD_INIT_FUNC_TRACE();
 
 	i40e_dev_stop(dev);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v3 4/8] ethdev: add GTP items to support flow API
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
                         ` (2 preceding siblings ...)
  2017-09-22 22:35       ` [PATCH v3 3/8] net/i40e: support RSS for new pctype Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  2017-09-22 13:39         ` Adrien Mazarguil
  2017-09-22 22:35       ` [PATCH v3 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
                         ` (3 subsequent siblings)
  7 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds GTP, GTPC and GTPU items for
generic flow API, and also exposes item fields
through the flow command.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 app/test-pmd/cmdline_flow.c                 | 52 +++++++++++++++++++++++++++++
 app/test-pmd/config.c                       |  3 ++
 doc/guides/prog_guide/rte_flow.rst          | 18 ++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
 lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
 5 files changed, 129 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..5e291c8 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTP,
+	ITEM_GTP_TEID,
+	ITEM_GTPC,
+	ITEM_GTPU,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +455,9 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTP,
+	ITEM_GTPC,
+	ITEM_GTPU,
 	ZERO,
 };
 
@@ -588,6 +595,24 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtp[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
+static const enum index item_gtpc[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
+static const enum index item_gtpu[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1446,33 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTP] = {
+		.name = "gtp",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTP_TEID] = {
+		.name = "teid",
+		.help = "tunnel endpoint identifier",
+		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
+	[ITEM_GTPC] = {
+		.name = "gtpc",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtpc),
+		.call = parse_vc,
+	},
+	[ITEM_GTPU] = {
+		.name = "gtpu",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtpu),
+		.call = parse_vc,
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index ca83eef..4063e01 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -947,6 +947,9 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..1bc8f19 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,24 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTP``, ``GTPC``, ``GTPU``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches a GTP header.
+
+Note: GTP, GTPC and GTPU use the same structure. Since only UDP destination port
+is used to distinguish GTP_C (port is 2123) and GTP_U packets (port is 2152),
+GTPC and GTPU item are defined for a user-friendly API when creating GTP-C and
+GTP-U flow.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: tunnel endpoint identifier.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5..8cc2399 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2696,6 +2696,10 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtp``, ``gtpc``, ``gtpu``: match GTP header.
+
+  - ``teid {unsigned}``: tunnel endpoint identifier.
+
 Actions list
 ^^^^^^^^^^^^
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..5da3aff 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,33 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTP,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-C packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPC,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-U packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPU,
 };
 
 /**
@@ -735,6 +762,31 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTP header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version (2b), protocol type (1b), reserved (1b),
+	 * Extension header flag (1b),
+	 * Sequence number flag (1b),
+	 * N-PDU number flag (1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type. */
+	rte_be16_t msg_len; /**< Message length. */
+	rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+	.teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v3 5/8] net/i40e: finish integration FDIR with generic flow API
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
                         ` (3 preceding siblings ...)
  2017-09-22 22:35       ` [PATCH v3 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                         ` (2 subsequent siblings)
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  94 +++++++-
 drivers/net/i40e/i40e_fdir.c   | 490 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  76 +++----
 3 files changed, 597 insertions(+), 63 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 85ae07b..bec68c4 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -460,6 +460,91 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/**
+ * A union contains the inputs for all types of flow
+ * Items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/**
+ * A structure used to contain extend input of flow
+ */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/**< It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /**< 1 for VF, 0 for port dev */
+	uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
+};
+
+/**
+ * A structure used to define the input for a flow director filter entry
+ */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/**< Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/**< Additional fields to match */
+};
+
+/**
+ * Behavior will be taken if FDIR match
+ */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/**
+ * Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /**< Report nothing. */
+	I40E_FDIR_REPORT_ID,            /**< Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /**< Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /**< Report 8 flex bytes. */
+};
+
+/**
+ * A structure used to define an action when match FDIR packet filter.
+ */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /**< Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /**< Behavior will be taken */
+	enum i40e_fdir_status report_status;  /**< Status report option */
+	/**
+	 * If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/**
+ * A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/**< ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /**< Input set */
+	struct i40e_fdir_action action;  /**< Action taken when match */
+};
+
 /*
  * Structure to store flex pit for flow diretor.
  */
@@ -483,7 +568,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -907,7 +992,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -981,7 +1066,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -994,6 +1079,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 84c0a1f..eb2593b 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -934,6 +939,263 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1007,17 +1269,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1052,7 +1314,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1082,16 +1344,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1373,69 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES(
+			(int)i40e_flowtype_to_pctype(
+			filter->input.flow_type)));
+	} else
+		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
+			    " check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1454,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1142,12 +1464,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
 			hw, I40E_GLQF_FD_PCTYPES(
-			(int)i40e_flowtype_to_pctype(
-			filter->input.flow_type)));
+			(int)filter->input.pctype));
 	} else
-		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+		pctype = filter->input.pctype;
 
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1302,6 +1623,141 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 }
 
 /*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
+	for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if (i40e_check_fdir_programming_status(rxq) >= 0)
+			return 0;
+		rte_delay_us(1);
+	}
+	PMD_DRV_LOG(ERR,
+		 "Failed to program FDIR filter: programming status reported.");
+	return -ETIMEDOUT;
+}
+
+/*
  * i40e_fdir_flush - clear all filters of Flow Director table
  * @pf: board private structure
  */
@@ -1580,7 +2036,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,14 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(flow_type);
-	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2797,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2832,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2855,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2876,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2911,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3877,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3927,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v3 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
                         ` (4 preceding siblings ...)
  2017-09-22 22:35       ` [PATCH v3 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds FDIR support for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c |   1 +
 drivers/net/i40e/i40e_ethdev.h |  37 ++++++
 drivers/net/i40e/i40e_fdir.c   | 198 ++++++++++++++++++++++----------
 drivers/net/i40e/i40e_flow.c   | 253 ++++++++++++++++++++++++++++++++++-------
 4 files changed, 392 insertions(+), 97 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index a15d994..8514ba0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1047,6 +1047,7 @@ i40e_init_customer_pctype(struct i40e_pf *pf)
 		pf->new_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
 		pf->new_pctype[i].valid = false;
 	}
+	pf->new_pctype_used = false;
 }
 
 static int
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index bec68c4..928a068 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -233,6 +233,7 @@ enum i40e_flxpld_layer_idx {
 #define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
+#define I40E_INSET_GTP_TEID              0x0000004000000000ULL
 
 /* bit 48 ~ bit 55 */
 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
@@ -461,6 +462,31 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /**
+ * A structure used to define the input for GTP flow
+ */
+struct i40e_gtp_flow {
+	struct rte_eth_udpv4_flow udp; /**< IPv4 UDP fields to match. */
+	uint8_t msg_type;              /**< Message type. */
+	uint32_t teid;                 /**< TEID in big endian. */
+};
+
+/**
+ * A structure used to define the input for GTP IPV4 flow
+ */
+struct i40e_gtp_ipv4_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv4_flow ip4;
+};
+
+/**
+ * A structure used to define the input for GTP IPV6 flow
+ */
+struct i40e_gtp_ipv6_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv6_flow ip6;
+};
+
+/**
  * A union contains the inputs for all types of flow
  * Items in flows need to be in big endian
  */
@@ -474,6 +500,14 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtp_flow       gtp_flow;
+	struct i40e_gtp_ipv4_flow  gtp_ipv4_flow;
+	struct i40e_gtp_ipv6_flow  gtp_ipv6_flow;
+};
+
+enum i40e_fdir_ip_type {
+	I40E_FDIR_IPTYPE_IPV4,
+	I40E_FDIR_IPTYPE_IPV6,
 };
 
 /**
@@ -485,6 +519,8 @@ struct i40e_fdir_flow_ext {
 	/**< It is filled by the flexible payload to match. */
 	uint8_t is_vf;   /**< 1 for VF, 0 for port dev */
 	uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
+	bool inner_ip;   /**< If there is inner ip */
+	enum i40e_fdir_ip_type iip_type; /**< ip type for inner ip */
 };
 
 /**
@@ -891,6 +927,7 @@ struct i40e_pf {
 	bool gtp_support; /* 1 - support GTP-C and GTP-U */
 	/* customer personalized pctype */
 	struct i40e_personalized_pctype new_pctype[I40E_PERSONALIZED_MAX];
+	bool new_pctype_used; /* Check if new PCTYPE is used for FDIR */
 };
 
 enum pending_msg {
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index eb2593b..56bd2cc 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,9 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
+#define I40E_FDIR_INNER_IPv6_DEFAULT_LEN    344
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -939,16 +942,33 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static struct i40e_personalized_pctype *
+i40e_flow_fdir_check_new_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+	struct i40e_personalized_pctype *cus_pctype;
+	enum i40e_new_proto i = I40E_PERSONALIZED_GTPC;
+
+	for (; i < I40E_PERSONALIZED_MAX; i++) {
+		cus_pctype = &pf->new_pctype[i];
+		if (pctype == cus_pctype->pctype)
+			return cus_pctype;
+	}
+	return NULL;
+}
+
 static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+				const struct i40e_fdir_input *fdir_input,
 				unsigned char *raw_pkt,
 				bool vlan)
 {
+	struct i40e_personalized_pctype *cus_pctype = NULL;
 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
 	uint16_t *ether_type;
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -975,27 +995,30 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	if (pf->new_pctype_used) {
+		cus_pctype = i40e_flow_fdir_check_new_pctype(pf, pctype);
+		if (!cus_pctype)
+			PMD_DRV_LOG(ERR, "unknown pctype %u.",
+				    fdir_input->pctype);
+	}
+
+	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+		 pf->new_pctype_used) {
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
 		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
 		/* set len to by default */
 		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
-		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
-					fdir_input->flow.ip4_flow.proto :
-					next_proto[fdir_input->pctype];
 		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
-					fdir_input->flow.ip4_flow.ttl :
-					I40E_FDIR_IP_DEFAULT_TTL;
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
 		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
 		/**
 		 * The source and destination fields in the transmitted packet
@@ -1004,13 +1027,22 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		 */
 		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
 		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+		if (!pf->new_pctype_used)
+			ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+				fdir_input->flow.ip4_flow.proto :
+				next_proto[fdir_input->pctype];
+		else if (cus_pctype->index == I40E_PERSONALIZED_GTPC ||
+			 cus_pctype->index == I40E_PERSONALIZED_GTPU_IPV4 ||
+			 cus_pctype->index == I40E_PERSONALIZED_GTPU_IPV6 ||
+			 cus_pctype->index == I40E_PERSONALIZED_GTPU)
+			ip->next_proto_id = IPPROTO_UDP;
 		len += sizeof(struct ipv4_hdr);
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		ip6 = (struct ipv6_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1021,11 +1053,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip6->payload_len =
 			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
 		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
-					fdir_input->flow.ipv6_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ipv6_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
-					fdir_input->flow.ipv6_flow.hop_limits :
-					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+			fdir_input->flow.ipv6_flow.hop_limits :
+			I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
 		/**
 		 * The source and destination fields in the transmitted packet
 		 * need to be presented in a reversed order with respect
@@ -1038,12 +1070,12 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 			   &fdir_input->flow.ipv6_flow.src_ip,
 			   IPV6_ADDR_LEN);
 		len += sizeof(struct ipv6_hdr);
-		break;
-	default:
+	} else {
 		PMD_DRV_LOG(ERR, "unknown pctype %u.",
 			    fdir_input->pctype);
 		return -1;
 	}
+
 	return len;
 }
 
@@ -1058,23 +1090,28 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			     const struct i40e_fdir_input *fdir_input,
 			     unsigned char *raw_pkt)
 {
-	unsigned char *payload, *ptr;
+	unsigned char *payload = NULL;
+	unsigned char *ptr;
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
+	struct ipv4_hdr *gtp_ipv4;
+	struct ipv6_hdr *gtp_ipv6;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
+	struct i40e_personalized_pctype *cus_pctype;
 
 	/* fill the ethernet and IP head */
-	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
 					      !!fdir_input->flow_ext.vlan_tci);
 	if (len < 0)
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1085,9 +1122,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1098,9 +1133,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1111,15 +1144,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1130,9 +1159,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1143,9 +1170,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1156,14 +1181,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
 		payload = raw_pkt + len;
 		/**
 		 * ARP packet is a special case on which the payload
@@ -1173,10 +1195,68 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 				rte_cpu_to_be_16(ETHER_TYPE_ARP))
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
-		return -EINVAL;
+	} else if (pf->new_pctype_used) {
+		cus_pctype = i40e_flow_fdir_check_new_pctype(pf, pctype);
+		if (cus_pctype->index == I40E_PERSONALIZED_GTPC ||
+		    cus_pctype->index == I40E_PERSONALIZED_GTPU_IPV4 ||
+		    cus_pctype->index == I40E_PERSONALIZED_GTPU_IPV6 ||
+		    cus_pctype->index == I40E_PERSONALIZED_GTPU) {
+			udp = (struct udp_hdr *)(raw_pkt + len);
+			udp->dgram_len =
+				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+			gtp = (struct rte_flow_item_gtp *)
+				((unsigned char *)udp + sizeof(struct udp_hdr));
+			gtp->v_pt_rsv_flags = 0x30;
+			gtp->msg_len =
+				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+			gtp->teid = fdir_input->flow.gtp_flow.teid;
+			gtp->msg_type = 0x1;
+
+			if (cus_pctype->index == I40E_PERSONALIZED_GTPC)
+				udp->dst_port = rte_cpu_to_be_16(2123);
+			else
+				udp->dst_port = rte_cpu_to_be_16(2152);
+
+			if (cus_pctype->index == I40E_PERSONALIZED_GTPU_IPV4) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv4 = (struct ipv4_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv4->version_ihl =
+					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+				gtp_ipv4->next_proto_id = IPPROTO_IP;
+				gtp_ipv4->total_length =
+					rte_cpu_to_be_16(
+						I40E_FDIR_INNER_IP_DEFAULT_LEN);
+				payload = (unsigned char *)gtp_ipv4 +
+					sizeof(struct ipv4_hdr);
+			} else if (cus_pctype->index ==
+				   I40E_PERSONALIZED_GTPU_IPV6) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv6 = (struct ipv6_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv6->vtc_flow =
+					rte_cpu_to_be_32(
+					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
+				gtp_ipv6->proto = IPPROTO_NONE;
+				gtp_ipv6->payload_len =
+					rte_cpu_to_be_16(
+					      I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
+				gtp_ipv6->hop_limits =
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+				payload = (unsigned char *)gtp_ipv6 +
+					sizeof(struct ipv6_hdr);
+			} else
+				payload = (unsigned char *)gtp +
+					sizeof(struct rte_flow_item_gtp);
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
 	}
 
 	/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..b9eaf17 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +250,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1644,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	/* FDIR - support default flow type with flexible payload */
 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,6 +2378,42 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
 	return 0;
 }
 
+static uint8_t
+i40e_flow_fdir_find_new_pctype(struct i40e_pf *pf,
+			       enum rte_flow_item_type item_type,
+			       struct i40e_fdir_filter_conf *filter)
+{
+	struct i40e_personalized_pctype *cus_pctype = NULL;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_GTPC:
+		cus_pctype = i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPC);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTPU:
+		if (!filter->input.flow_ext.inner_ip)
+			cus_pctype = i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPU);
+		else if (filter->input.flow_ext.iip_type ==
+			I40E_FDIR_IPTYPE_IPV4)
+			cus_pctype = i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPU_IPV4);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV6)
+			cus_pctype = i40e_find_personalized_pctype(pf,
+						   I40E_PERSONALIZED_GTPU_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type");
+		break;
+	}
+
+	if (cus_pctype)
+		return cus_pctype->pctype;
+
+	return I40E_FILTER_PCTYPE_INVALID;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
  * 3. Supported flow type and input set: refer to array
@@ -2326,14 +2438,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	int pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	enum rte_flow_item_type cus_item = RTE_FLOW_ITEM_TYPE_END;
 	uint32_t i, j;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2351,6 +2465,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	uint16_t outer_tpid;
 	uint16_t ether_type;
 	uint32_t vtc_flow_cpu;
+	bool outer_ip = true;
 	int ret;
 
 	memset(off_arr, 0, sizeof(off_arr));
@@ -2430,7 +2545,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv4_mask =
 				(const struct rte_flow_item_ipv4 *)item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
+			if (ipv4_spec && ipv4_mask && outer_ip) {
 				/* Check IPv4 mask and update input set */
 				if (ipv4_mask->hdr.version_ihl ||
 				    ipv4_mask->hdr.total_length ||
@@ -2475,9 +2590,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					ipv4_spec->hdr.src_addr;
 				filter->input.flow.ip4_flow.dst_ip =
 					ipv4_spec->hdr.dst_addr;
+
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV4;
+			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv4 mask.");
+				return -rte_errno;
 			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+			if (outer_ip)
+				outer_ip = false;
 
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -2487,7 +2615,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv6_mask =
 				(const struct rte_flow_item_ipv6 *)item->mask;
 
-			if (ipv6_spec && ipv6_mask) {
+			if (ipv6_spec && ipv6_mask && outer_ip) {
 				/* Check IPv6 mask and update input set */
 				if (ipv6_mask->hdr.payload_len) {
 					rte_flow_error_set(error, EINVAL,
@@ -2538,10 +2666,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				else
 					pctype =
 					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
-			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV6;
+			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv6 mask");
+				return -rte_errno;
+			}
 
+			if (outer_ip)
+				outer_ip = false;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
@@ -2636,6 +2776,30 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				filter->input.flow.gtp_flow.teid =
+					gtp_spec->teid;
+
+				pf->new_pctype_used = true;
+				cus_item = item_type;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2938,56 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Conflict with the first rule's input set.");
-		return -rte_errno;
-	} else if (ret == -EINVAL) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Invalid pattern mask.");
-		return -rte_errno;
+	if (pf->new_pctype_used) {
+		pctype = i40e_flow_fdir_find_new_pctype(pf, cus_item, filter);
+		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Unsupported protocol");
+			return -rte_errno;
+		}
 	}
 
-	filter->input.pctype = pctype;
+	if (!pf->new_pctype_used) {
+		ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Conflict with the first rule's input set.");
+			return -rte_errno;
+		} else if (ret == -EINVAL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Invalid pattern mask.");
+			return -rte_errno;
+		}
 
-	/* Store flex mask to SW */
-	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Exceed maximal number of bitmasks");
-		return -rte_errno;
-	} else if (ret == -2) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Conflict with the first flexible rule");
-		return -rte_errno;
-	} else if (ret > 0)
-		cfg_flex_msk = false;
+		/* Store flex mask to SW */
+		ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Exceed maximal number of bitmasks");
+			return -rte_errno;
+		} else if (ret == -2) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Conflict with the first flexible rule");
+			return -rte_errno;
+		} else if (ret > 0)
+			cfg_flex_msk = false;
 
-	if (cfg_flex_pit)
-		i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+		if (cfg_flex_pit)
+			i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-	if (cfg_flex_msk)
-		i40e_flow_set_fdir_flex_msk(pf, pctype);
+		if (cfg_flex_msk)
+			i40e_flow_set_fdir_flex_msk(pf, pctype);
+	}
+
+	filter->input.pctype = pctype;
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v3 7/8] net/i40e: add cloud filter parsing function for GTP
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
                         ` (5 preceding siblings ...)
  2017-09-22 22:35       ` [PATCH v3 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  2017-09-22 22:35       ` [PATCH v3 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 151 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 153 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 928a068..913d9b1 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -720,6 +720,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b9eaf17..f37ab10 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1808,6 +1814,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3813,6 +3824,146 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!pf->gtp_support) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "GTP is unsupported.");
+				return -rte_errno;
+			}
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v3 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
                         ` (6 preceding siblings ...)
  2017-09-22 22:35       ` [PATCH v3 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-09-22 22:35       ` Beilei Xing
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-22 22:35 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

GTP-C & GTP-U are not supported by cloud filter due
to limited resource of HW, this patch enables GTP-C
and GTP-U cloud filter by replacing inner_mac and
TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 189 +++++++++++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_ethdev.h |  13 ++-
 drivers/net/i40e/i40e_flow.c   |  12 +--
 3 files changed, 187 insertions(+), 27 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 8514ba0..c93b106 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7157,7 +7157,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
-	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 3 entries */
@@ -7205,12 +7205,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7228,12 +7228,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 
@@ -7342,6 +7461,36 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		big_buffer = 1;
 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
 		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
 			ret = i40e_cloud_filter_qinq_create(pf);
@@ -7368,13 +7517,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 
 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
-			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+			I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	else {
 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
 						&pfilter->element.flags);
@@ -10895,14 +11050,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 			   sizeof(f->input.general_fields));
 
 		if (((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
 			big_buffer = 1;
 
 		if (big_buffer)
@@ -11294,7 +11449,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 2 entries */
@@ -11325,13 +11480,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L2 filter, input for L2 filter will be L1 filter  */
 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 913d9b1..14a7c8e 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -669,10 +669,14 @@ struct i40e_ethertype_rule {
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10 0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11 0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13 0x13
+#define I40E_AQC_NEW_TR_21          21
+#define I40E_AQC_NEW_TR_22          22
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -922,6 +926,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index f37ab10..f2e3fc7 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4334,12 +4334,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		vsi = vf->vsi;
 	}
 
-	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
 		big_buffer = 1;
 
 	if (big_buffer)
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* Re: [PATCH v3 2/8] net/i40e: update ptype and pctype info
  2017-09-22 22:35       ` [PATCH v3 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-09-23  2:58         ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-09-23  2:58 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev

> +		if (!memcmp(name, "GTPC", sizeof("GTPC") - 1))
> +			new_pctype =
> +				i40e_find_personalized_pctype(pf,
> +						      I40E_PERSONALIZED_GTPC);
> +		else if (!memcmp(name, "GTPU_IPV4",
> +				 sizeof("GTPU_IPV4") - 1))
Memcmp -> strcmp?

[......]


> 
> +enum i40e_new_proto {
> +	I40E_PERSONALIZED_GTPC = 0,
> +	I40E_PERSONALIZED_GTPU_IPV4,
> +	I40E_PERSONALIZED_GTPU_IPV6,
> +	I40E_PERSONALIZED_GTPU,
> +	I40E_PERSONALIZED_MAX,
> +};
> +
> +#define I40E_FILTER_PCTYPE_INVALID     0
> +struct i40e_personalized_pctype {
> +	uint8_t index;    /* Indicate which personalized pctype */


The index is I40E_PERSONALIZED_XXX right? Why not define it like
enum i40e_new_proto index? Or you can just use #define to define
personalized pctype instead of enum.
> +	uint8_t pctype;   /* New pctype value */
> +	bool valid;   /* Check if it's valid */
> +};
> +
>  /*
>   * Structure to store private data specific for PF instance.
>   */
> @@ -786,6 +801,11 @@ struct i40e_pf {
>  	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
>  	bool qinq_replace_flag;  /* QINQ filter replace is done */
>  	struct i40e_tm_conf tm_conf;
> +
> +	/* Dynamic Device Personalization */
> +	bool gtp_support; /* 1 - support GTP-C and GTP-U */
> +	/* customer personalized pctype */
> +	struct i40e_personalized_pctype new_pctype[I40E_PERSONALIZED_MAX];
>  };
> 
>  enum pending_msg {
> @@ -1003,6 +1023,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr,
> uint32_t val);
>  int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
>  void i40e_tm_conf_init(struct rte_eth_dev *dev);
>  void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
> +struct i40e_personalized_pctype*
> +i40e_find_personalized_pctype(struct i40e_pf *pf, uint8_t index);
> +void i40e_update_personalized_info(struct rte_eth_dev *dev, uint8_t *pkg,
> +				   uint32_t pkg_size);
> 
>  #define I40E_DEV_TO_PCI(eth_dev) \
>  	RTE_DEV_TO_PCI((eth_dev)->device)
> diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
> index 9f9c808..d1313f6 100644
> --- a/drivers/net/i40e/rte_pmd_i40e.c
> +++ b/drivers/net/i40e/rte_pmd_i40e.c
> @@ -1608,6 +1608,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t
> *buff,
>  		return -EINVAL;
>  	}
> 
> +	i40e_update_personalized_info(dev, buff, size);
> +
>  	/* Find metadata segment */
>  	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
>  							pkg_hdr);
> @@ -2090,7 +2092,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
>  	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
>  	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
>  	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
> -	    tnl != RTE_PTYPE_TUNNEL_GRENAT)
> +	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
> +	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
> +	    tnl != RTE_PTYPE_TUNNEL_GTPU)
>  		return -1;
> 
>  	if (il2 &&
> --
> 2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v3 1/8] mbuf: support GTP in software packet type parser
  2017-09-22 22:35       ` [PATCH v3 1/8] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-09-25  9:21         ` Olivier MATZ
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
  1 sibling, 0 replies; 116+ messages in thread
From: Olivier MATZ @ 2017-09-25  9:21 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On Sat, Sep 23, 2017 at 06:35:07AM +0800, Beilei Xing wrote:
> Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>

Acked-by: Olivier Matz <olivier.matz@6wind.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v4 0/8] GPT-C and GTP-U enabling
  2017-09-22 22:35       ` [PATCH v3 1/8] mbuf: support GTP in software packet type parser Beilei Xing
  2017-09-25  9:21         ` Olivier MATZ
@ 2017-09-28  2:17         ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 1/8] mbuf: support GTP in software packet type parser Beilei Xing
                             ` (9 more replies)
  1 sibling, 10 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
It depends on Kirill's patch:
http://dpdk.org/ml/archives/dev/2017-September/076035.html.
However, Kirill's patchset needs to be updated.

v4 changes:
 - Refine fdir related code.
 - Rework profile metadata parsing function.
 - Fix code style.

v3 changes:
 - Rework implementation to support the new profile.
 - Add GTPC and GTPU tunnel type in software packet type parser.
 - Update ptype info when loading profile.
 - Fix bug of updating pctype info.


v2 changes:
 - Enable RSS/FDIR/cloud filter dinamicly by checking profile
 - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
 - Rework FDIR/cloud filter enabling function

Beilei Xing (8):
  mbuf: support GTP in software packet type parser
  net/i40e: update ptype and pctype info
  net/i40e: support RSS for new pctype
  ethdev: add GTP items to support flow API
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  40 ++
 app/test-pmd/config.c                       |   3 +
 doc/guides/prog_guide/rte_flow.rst          |  18 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
 drivers/net/i40e/i40e_ethdev.c              | 530 +++++++++++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h              | 156 +++++++-
 drivers/net/i40e/i40e_fdir.c                | 572 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 496 ++++++++++++++++++++----
 drivers/net/i40e/rte_pmd_i40e.c             |   6 +-
 lib/librte_ether/rte_flow.h                 |  52 +++
 lib/librte_mbuf/rte_mbuf_ptype.c            |   2 +
 lib/librte_mbuf/rte_mbuf_ptype.h            |  24 ++
 12 files changed, 1775 insertions(+), 128 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v4 1/8] mbuf: support GTP in software packet type parser
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 2/8] net/i40e: update ptype and pctype info Beilei Xing
                             ` (8 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
---
 lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
 lib/librte_mbuf/rte_mbuf_ptype.h | 24 ++++++++++++++++++++++++
 2 files changed, 26 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index e5c4fae..a450814 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -89,6 +89,8 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
 	case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
 	case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
 	case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
+	case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
+	case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
 	default: return "TUNNEL_UNKNOWN";
 	}
 }
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index acd70bb..eb7cd2c 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -383,6 +383,30 @@ extern "C" {
  */
 #define RTE_PTYPE_TUNNEL_GRENAT             0x00006000
 /**
+ * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2123>
+ */
+#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
+/**
+ * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2152>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2152>
+ */
+#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
+/**
  * Mask of tunneling packet types.
  */
 #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v4 2/8] net/i40e: update ptype and pctype info
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 1/8] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 3/8] net/i40e: support RSS for new pctype Beilei Xing
                             ` (7 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Update new packet type and new pctype info when downloading
profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c  | 312 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h  |  24 ++++
 drivers/net/i40e/rte_pmd_i40e.c |   6 +-
 3 files changed, 341 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 720f067..d6b0d50 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -65,6 +65,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -1036,6 +1037,21 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+	int i;
+
+	/* Initialize customized pctype */
+	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+		pf->customized_pctype[i].index = i;
+		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+		pf->customized_pctype[i].valid = false;
+	}
+
+	pf->gtp_support = false;
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1301,6 +1317,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
+	/* Initialize customized information */
+	i40e_init_customized_info(pf);
+
 	ret = i40e_init_ethtype_filter_list(dev);
 	if (ret < 0)
 		goto err_init_ethtype_filter_list;
@@ -10893,6 +10912,299 @@ is_i40e_supported(struct rte_eth_dev *dev)
 	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+	int i;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].index == index)
+			return &pf->customized_pctype[i];
+	}
+	return NULL;
+}
+
+static int
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size, uint32_t proto_num,
+			      struct rte_pmd_i40e_proto_info *proto)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t pctype_num;
+	struct rte_pmd_i40e_ptype_info *pctype;
+	uint32_t buff_size;
+	struct i40e_customized_pctype *new_pctype = NULL;
+	uint8_t proto_id;
+	uint8_t pctype_value;
+	char name[64];
+	uint32_t i, j, n;
+	int ret;
+
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&pctype_num, sizeof(pctype_num),
+				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype number");
+		return -1;
+	}
+	if (!pctype_num) {
+		PMD_DRV_LOG(INFO, "No new pctype added");
+		return -1;
+	}
+
+	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+	pctype = rte_zmalloc("new_pctype", buff_size, 0);
+	if (!pctype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+	/* get information about new pctype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)pctype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype list");
+		rte_free(pctype);
+		return -1;
+	}
+
+	/* Update customized pctype. */
+	for (i = 0; i < pctype_num; i++) {
+		pctype_value = pctype[i].ptype_id;
+		memset(name, 0, sizeof(name));
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = pctype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				strcat(name, proto[n].name);
+				strcat(name, "_");
+				break;
+			}
+		}
+		name[strlen(name) - 1] = '\0';
+		if (!strcmp(name, "GTPC"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPC);
+		else if (!strcmp(name, "GTPU_IPV4"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (!strcmp(name, "GTPU_IPV6"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV6);
+		else if (!strcmp(name, "GTPU"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPU);
+		if (new_pctype) {
+			new_pctype->pctype = pctype_value;
+			new_pctype->valid = true;
+		}
+	}
+
+	rte_free(pctype);
+	return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+			       uint32_t pkg_size, uint32_t proto_num,
+			       struct rte_pmd_i40e_proto_info *proto)
+{
+	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+	uint8_t port_id = dev->data->port_id;
+	uint32_t ptype_num;
+	struct rte_pmd_i40e_ptype_info *ptype;
+	uint32_t buff_size;
+	uint8_t proto_id;
+	char name[16];
+	uint32_t i, j, n;
+	bool inner_ip;
+	int ret;
+
+	/* get information about new ptype num */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&ptype_num, sizeof(ptype_num),
+				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype number");
+		return -1;
+	}
+	if (!ptype_num) {
+		PMD_DRV_LOG(INFO, "No new ptype added");
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+	ptype = rte_zmalloc("new_ptype", buff_size, 0);
+	if (!ptype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+
+	/* get information about new ptype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)ptype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype list");
+		rte_free(ptype);
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+	if (!ptype_mapping) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		rte_free(ptype);
+		return -1;
+	}
+
+	/* Update ptype mapping table. */
+	for (i = 0; i < ptype_num; i++) {
+		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+		ptype_mapping[i].sw_ptype = 0;
+		inner_ip = false;
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = ptype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				memset(name, 0, sizeof(name));
+				strcpy(name, proto[n].name);
+				if (!strcmp(name, "IPV4") && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strcmp(name, "IPV4") && inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+				} else if (!strcmp(name, "IPV6") && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strcmp(name, "IPV6") && inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+				} else if (!strcmp(name, "IPV4FRAG")) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strcmp(name, "IPV6FRAG")) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strcmp(name, "GTPC"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPC;
+				else if (!strcmp(name, "GTPU"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPU;
+				else if (!strcmp(name, "UDP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_UDP;
+				else if (!strcmp(name, "TCP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_TCP;
+				else if (!strcmp(name, "SCTP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_SCTP;
+				else if (!strcmp(name, "ICMP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_ICMP;
+
+				break;
+			}
+		}
+	}
+
+	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+						ptype_num, 0);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+		rte_free(ptype_mapping);
+		rte_free(ptype);
+		return -1;
+	}
+
+	rte_free(ptype_mapping);
+	rte_free(ptype);
+	return 0;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t proto_num;
+	struct rte_pmd_i40e_proto_info *proto;
+	uint32_t buff_size;
+	uint32_t i;
+	int ret;
+
+	/* get information about protocol number */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				       (uint8_t *)&proto_num, sizeof(proto_num),
+				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol number");
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new protocol added");
+		return;
+	}
+
+	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+	proto = rte_zmalloc("new_proto", buff_size, 0);
+	if (!proto) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return;
+	}
+
+	/* get information about protocol list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)proto, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol list");
+		rte_free(proto);
+		return;
+	}
+
+	/* Check if GTP is supported. */
+	for (i = 0; i < proto_num; i++) {
+		if (!strncmp(proto[i].name, "GTP", 3)) {
+			pf->gtp_support = true;
+			break;
+		}
+	}
+
+	/* Update customized pctype info */
+	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+					    proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+	/* Update customized ptype info */
+	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+					   proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+	rte_free(proto);
+}
+
 /* Create a QinQ cloud filter
  *
  * The Fortville NIC has limited resources for tunnel filters,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ad80f0f..73fb5c3 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -722,6 +722,21 @@ struct i40e_tm_conf {
 	bool committed;
 };
 
+enum i40e_new_pctype {
+	I40E_CUSTOMIZED_GTPC = 0,
+	I40E_CUSTOMIZED_GTPU_IPV4,
+	I40E_CUSTOMIZED_GTPU_IPV6,
+	I40E_CUSTOMIZED_GTPU,
+	I40E_CUSTOMIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID     0
+struct i40e_customized_pctype {
+	enum i40e_new_pctype index;  /* Indicate which customized pctype */
+	uint8_t pctype;   /* New pctype value */
+	bool valid;   /* Check if it's valid */
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -786,6 +801,11 @@ struct i40e_pf {
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
+
+	/* Dynamic Device Personalization */
+	bool gtp_support; /* 1 - support GTP-C and GTP-U */
+	/* customer customized pctype */
+	struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
 };
 
 enum pending_msg {
@@ -1003,6 +1023,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+				 uint32_t pkg_size);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index 5d39044..7e0e23a 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1608,6 +1608,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -EINVAL;
 	}
 
+	i40e_update_customized_info(dev, buff, size);
+
 	/* Find metadata segment */
 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
 							pkg_hdr);
@@ -2106,7 +2108,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
 	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
 	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
 	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
-	    tnl != RTE_PTYPE_TUNNEL_GRENAT)
+	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPU)
 		return -1;
 
 	if (il2 &&
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v4 3/8] net/i40e: support RSS for new pctype
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 1/8] mbuf: support GTP in software packet type parser Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 4/8] ethdev: add GTP items to support flow API Beilei Xing
                             ` (6 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Enable RSS for new pctypes after downloading
new profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index d6b0d50..aba35a5 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1928,6 +1928,31 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, true);
 }
 
+static void
+i40e_customized_pctype_hash_set(struct i40e_pf *pf, bool enable)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint64_t hena;
+	int i;
+
+	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].valid) {
+			if (enable)
+				hena |= 1ULL << pf->customized_pctype[i].pctype;
+			else
+				hena &= ~(1ULL <<
+					  pf->customized_pctype[i].pctype);
+		}
+	}
+
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+	I40E_WRITE_FLUSH(hw);
+}
+
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
@@ -2075,6 +2100,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	i40e_customized_pctype_hash_set(pf, true);
+
 	return I40E_SUCCESS;
 
 err_up:
@@ -2155,6 +2182,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 	uint32_t reg;
 	int i;
 
+	i40e_customized_pctype_hash_set(pf, false);
+
 	PMD_INIT_FUNC_TRACE();
 
 	i40e_dev_stop(dev);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v4 4/8] ethdev: add GTP items to support flow API
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
                             ` (2 preceding siblings ...)
  2017-09-28  2:17           ` [PATCH v4 3/8] net/i40e: support RSS for new pctype Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
                             ` (5 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds GTP, GTPC and GTPU items for
generic flow API, and also exposes item fields
through the flow command.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
 app/test-pmd/config.c                       |  3 ++
 doc/guides/prog_guide/rte_flow.rst          | 18 ++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
 lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
 5 files changed, 117 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..26c3e4f 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTP,
+	ITEM_GTP_TEID,
+	ITEM_GTPC,
+	ITEM_GTPU,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +455,9 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTP,
+	ITEM_GTPC,
+	ITEM_GTPU,
 	ZERO,
 };
 
@@ -588,6 +595,12 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtp[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1434,33 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTP] = {
+		.name = "gtp",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTP_TEID] = {
+		.name = "teid",
+		.help = "tunnel endpoint identifier",
+		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
+	[ITEM_GTPC] = {
+		.name = "gtpc",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTPU] = {
+		.name = "gtpu",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index e8e311c..9b09bbd 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -949,6 +949,9 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..1bc8f19 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,24 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTP``, ``GTPC``, ``GTPU``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches a GTP header.
+
+Note: GTP, GTPC and GTPU use the same structure. Since only UDP destination port
+is used to distinguish GTP_C (port is 2123) and GTP_U packets (port is 2152),
+GTPC and GTPU item are defined for a user-friendly API when creating GTP-C and
+GTP-U flow.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: tunnel endpoint identifier.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5..8cc2399 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2696,6 +2696,10 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtp``, ``gtpc``, ``gtpu``: match GTP header.
+
+  - ``teid {unsigned}``: tunnel endpoint identifier.
+
 Actions list
 ^^^^^^^^^^^^
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..5da3aff 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,33 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTP,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-C packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPC,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-U packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPU,
 };
 
 /**
@@ -735,6 +762,31 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTP header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version (2b), protocol type (1b), reserved (1b),
+	 * Extension header flag (1b),
+	 * Sequence number flag (1b),
+	 * N-PDU number flag (1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type. */
+	rte_be16_t msg_len; /**< Message length. */
+	rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+	.teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v4 5/8] net/i40e: finish integration FDIR with generic flow API
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
                             ` (3 preceding siblings ...)
  2017-09-28  2:17           ` [PATCH v4 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                             ` (4 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  83 ++++++-
 drivers/net/i40e/i40e_fdir.c   | 490 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  76 +++----
 3 files changed, 586 insertions(+), 63 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 73fb5c3..4d690a1 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -461,6 +461,80 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /*
+ * A union contains the inputs for all types of flow
+ * items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/* A structure used to contain extend input of flow */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/* It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
+	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+};
+
+/* A structure used to define the input for a flow director filter entry */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/* Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/* Additional fields to match */
+};
+
+/* Behavior will be taken if FDIR match */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/* Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */
+	I40E_FDIR_REPORT_ID,            /* Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /* Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /* Report 8 flex bytes. */
+};
+
+/* A structure used to define an action when match FDIR packet filter. */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /* Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /* Behavior will be taken */
+	enum i40e_fdir_status report_status;  /* Status report option */
+	/* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/* A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/* ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /* Input set */
+	struct i40e_fdir_action action;  /* Action taken when match */
+};
+
+/*
  * Structure to store flex pit for flow diretor.
  */
 struct i40e_fdir_flex_pit {
@@ -483,7 +557,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -907,7 +981,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -981,7 +1055,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -994,6 +1068,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 84c0a1f..eb2593b 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -934,6 +939,263 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1007,17 +1269,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1052,7 +1314,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1082,16 +1344,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1373,69 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES(
+			(int)i40e_flowtype_to_pctype(
+			filter->input.flow_type)));
+	} else
+		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
+			    " check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1454,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1142,12 +1464,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
 			hw, I40E_GLQF_FD_PCTYPES(
-			(int)i40e_flowtype_to_pctype(
-			filter->input.flow_type)));
+			(int)filter->input.pctype));
 	} else
-		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+		pctype = filter->input.pctype;
 
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1302,6 +1623,141 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 }
 
 /*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: "
+			    "time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
+	for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if (i40e_check_fdir_programming_status(rxq) >= 0)
+			return 0;
+		rte_delay_us(1);
+	}
+	PMD_DRV_LOG(ERR,
+		 "Failed to program FDIR filter: programming status reported.");
+	return -ETIMEDOUT;
+}
+
+/*
  * i40e_fdir_flush - clear all filters of Flow Director table
  * @pf: board private structure
  */
@@ -1580,7 +2036,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,14 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(flow_type);
-	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2797,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2832,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2855,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2876,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2911,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3877,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3927,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v4 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
                             ` (4 preceding siblings ...)
  2017-09-28  2:17           ` [PATCH v4 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
                             ` (3 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds FDIR support for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  30 +++++
 drivers/net/i40e/i40e_fdir.c   | 200 ++++++++++++++++++++++---------
 drivers/net/i40e/i40e_flow.c   | 263 +++++++++++++++++++++++++++++++++++------
 3 files changed, 396 insertions(+), 97 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 4d690a1..502f6c6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -460,6 +460,25 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/* A structure used to define the input for GTP flow */
+struct i40e_gtp_flow {
+	struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
+	uint8_t msg_type;              /* Message type. */
+	uint32_t teid;                 /* TEID in big endian. */
+};
+
+/* A structure used to define the input for GTP IPV4 flow */
+struct i40e_gtp_ipv4_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv4_flow ip4;
+};
+
+/* A structure used to define the input for GTP IPV6 flow */
+struct i40e_gtp_ipv6_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv6_flow ip6;
+};
+
 /*
  * A union contains the inputs for all types of flow
  * items in flows need to be in big endian
@@ -474,6 +493,14 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtp_flow       gtp_flow;
+	struct i40e_gtp_ipv4_flow  gtp_ipv4_flow;
+	struct i40e_gtp_ipv6_flow  gtp_ipv6_flow;
+};
+
+enum i40e_fdir_ip_type {
+	I40E_FDIR_IPTYPE_IPV4,
+	I40E_FDIR_IPTYPE_IPV6,
 };
 
 /* A structure used to contain extend input of flow */
@@ -483,6 +510,9 @@ struct i40e_fdir_flow_ext {
 	/* It is filled by the flexible payload to match. */
 	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
 	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+	bool inner_ip;   /* If there is inner ip */
+	enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
+	bool customized_pctype; /* If customized pctype is used */
 };
 
 /* A structure used to define the input for a flow director filter entry */
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index eb2593b..e374b42 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,9 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
+#define I40E_FDIR_INNER_IPv6_DEFAULT_LEN    344
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -939,16 +942,34 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static struct i40e_customized_pctype *
+i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+	struct i40e_customized_pctype *cus_pctype;
+	enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
+
+	for (; i < I40E_CUSTOMIZED_MAX; i++) {
+		cus_pctype = &pf->customized_pctype[i];
+		if (pctype == cus_pctype->pctype)
+			return cus_pctype;
+	}
+	return NULL;
+}
+
 static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+				const struct i40e_fdir_input *fdir_input,
 				unsigned char *raw_pkt,
 				bool vlan)
 {
+	struct i40e_customized_pctype *cus_pctype = NULL;
 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
 	uint16_t *ether_type;
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
+	bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -975,27 +996,30 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	if (is_customized_pctype) {
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (!cus_pctype)
+			PMD_DRV_LOG(ERR, "unknown pctype %u.",
+				    fdir_input->pctype);
+	}
+
+	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+		 is_customized_pctype) {
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
 		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
 		/* set len to by default */
 		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
-		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
-					fdir_input->flow.ip4_flow.proto :
-					next_proto[fdir_input->pctype];
 		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
-					fdir_input->flow.ip4_flow.ttl :
-					I40E_FDIR_IP_DEFAULT_TTL;
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
 		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
 		/**
 		 * The source and destination fields in the transmitted packet
@@ -1004,13 +1028,22 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		 */
 		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
 		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+		if (!is_customized_pctype)
+			ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+				fdir_input->flow.ip4_flow.proto :
+				next_proto[fdir_input->pctype];
+		else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+			ip->next_proto_id = IPPROTO_UDP;
 		len += sizeof(struct ipv4_hdr);
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		ip6 = (struct ipv6_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1021,11 +1054,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip6->payload_len =
 			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
 		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
-					fdir_input->flow.ipv6_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ipv6_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
-					fdir_input->flow.ipv6_flow.hop_limits :
-					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+			fdir_input->flow.ipv6_flow.hop_limits :
+			I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
 		/**
 		 * The source and destination fields in the transmitted packet
 		 * need to be presented in a reversed order with respect
@@ -1038,12 +1071,12 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 			   &fdir_input->flow.ipv6_flow.src_ip,
 			   IPV6_ADDR_LEN);
 		len += sizeof(struct ipv6_hdr);
-		break;
-	default:
+	} else {
 		PMD_DRV_LOG(ERR, "unknown pctype %u.",
 			    fdir_input->pctype);
 		return -1;
 	}
+
 	return len;
 }
 
@@ -1058,23 +1091,28 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			     const struct i40e_fdir_input *fdir_input,
 			     unsigned char *raw_pkt)
 {
-	unsigned char *payload, *ptr;
+	unsigned char *payload = NULL;
+	unsigned char *ptr;
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
+	struct ipv4_hdr *gtp_ipv4;
+	struct ipv6_hdr *gtp_ipv6;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
+	struct i40e_customized_pctype *cus_pctype;
 
 	/* fill the ethernet and IP head */
-	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
 					      !!fdir_input->flow_ext.vlan_tci);
 	if (len < 0)
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1085,9 +1123,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1098,9 +1134,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1111,15 +1145,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1130,9 +1160,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1143,9 +1171,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1156,14 +1182,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
 		payload = raw_pkt + len;
 		/**
 		 * ARP packet is a special case on which the payload
@@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 				rte_cpu_to_be_16(ETHER_TYPE_ARP))
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
-		return -EINVAL;
+	} else if (fdir_input->flow_ext.customized_pctype) {
+		/* If customized pctype is used */
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
+			udp = (struct udp_hdr *)(raw_pkt + len);
+			udp->dgram_len =
+				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+			gtp = (struct rte_flow_item_gtp *)
+				((unsigned char *)udp + sizeof(struct udp_hdr));
+			gtp->v_pt_rsv_flags = 0x30;
+			gtp->msg_len =
+				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+			gtp->teid = fdir_input->flow.gtp_flow.teid;
+			gtp->msg_type = 0x1;
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPC)
+				udp->dst_port = rte_cpu_to_be_16(2123);
+			else
+				udp->dst_port = rte_cpu_to_be_16(2152);
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv4 = (struct ipv4_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv4->version_ihl =
+					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+				gtp_ipv4->next_proto_id = IPPROTO_IP;
+				gtp_ipv4->total_length =
+					rte_cpu_to_be_16(
+						I40E_FDIR_INNER_IP_DEFAULT_LEN);
+				payload = (unsigned char *)gtp_ipv4 +
+					sizeof(struct ipv4_hdr);
+			} else if (cus_pctype->index ==
+				   I40E_CUSTOMIZED_GTPU_IPV6) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv6 = (struct ipv6_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv6->vtc_flow =
+					rte_cpu_to_be_32(
+					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
+				gtp_ipv6->proto = IPPROTO_NONE;
+				gtp_ipv6->payload_len =
+					rte_cpu_to_be_16(
+					      I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
+				gtp_ipv6->hop_limits =
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+				payload = (unsigned char *)gtp_ipv6 +
+					sizeof(struct ipv6_hdr);
+			} else
+				payload = (unsigned char *)gtp +
+					sizeof(struct rte_flow_item_gtp);
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
 	}
 
 	/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..ea81ecb 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +250,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1644,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	/* FDIR - support default flow type with flexible payload */
 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,6 +2378,42 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
 	return 0;
 }
 
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+				enum rte_flow_item_type item_type,
+				struct i40e_fdir_filter_conf *filter)
+{
+	struct i40e_customized_pctype *cus_pctype = NULL;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_GTPC:
+		cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPC);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTPU:
+		if (!filter->input.flow_ext.inner_ip)
+			cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPU);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV4)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV6)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type");
+		break;
+	}
+
+	if (cus_pctype)
+		return cus_pctype->pctype;
+
+	return I40E_FILTER_PCTYPE_INVALID;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
  * 3. Supported flow type and input set: refer to array
@@ -2326,14 +2438,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	uint8_t pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
 	uint32_t i, j;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2351,12 +2465,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	uint16_t outer_tpid;
 	uint16_t ether_type;
 	uint32_t vtc_flow_cpu;
+	bool outer_ip = true;
 	int ret;
 
 	memset(off_arr, 0, sizeof(off_arr));
 	memset(len_arr, 0, sizeof(len_arr));
 	memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
 	outer_tpid = i40e_get_outer_vlan(dev);
+	filter->input.flow_ext.customized_pctype = false;
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
@@ -2430,7 +2546,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv4_mask =
 				(const struct rte_flow_item_ipv4 *)item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
+			if (ipv4_spec && ipv4_mask && outer_ip) {
 				/* Check IPv4 mask and update input set */
 				if (ipv4_mask->hdr.version_ihl ||
 				    ipv4_mask->hdr.total_length ||
@@ -2475,9 +2591,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					ipv4_spec->hdr.src_addr;
 				filter->input.flow.ip4_flow.dst_ip =
 					ipv4_spec->hdr.dst_addr;
+
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV4;
+			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv4 mask.");
+				return -rte_errno;
 			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+			if (outer_ip)
+				outer_ip = false;
 
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -2487,7 +2616,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv6_mask =
 				(const struct rte_flow_item_ipv6 *)item->mask;
 
-			if (ipv6_spec && ipv6_mask) {
+			if (ipv6_spec && ipv6_mask && outer_ip) {
 				/* Check IPv6 mask and update input set */
 				if (ipv6_mask->hdr.payload_len) {
 					rte_flow_error_set(error, EINVAL,
@@ -2538,10 +2667,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				else
 					pctype =
 					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
-			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV6;
+			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv6 mask");
+				return -rte_errno;
+			}
 
+			if (outer_ip)
+				outer_ip = false;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
@@ -2636,6 +2777,37 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			if (!pf->gtp_support) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported protocol");
+				return -rte_errno;
+			}
+
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				filter->input.flow.gtp_flow.teid =
+					gtp_spec->teid;
+				filter->input.flow_ext.customized_pctype = true;
+				cus_proto = item_type;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2946,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Conflict with the first rule's input set.");
-		return -rte_errno;
-	} else if (ret == -EINVAL) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Invalid pattern mask.");
-		return -rte_errno;
+	/* Get customized pctype value */
+	if (filter->input.flow_ext.customized_pctype) {
+		pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Unsupported pctype");
+			return -rte_errno;
+		}
 	}
 
-	filter->input.pctype = pctype;
+	/* If customized pctype is not used, set fdir configuration.*/
+	if (!filter->input.flow_ext.customized_pctype) {
+		ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Conflict with the first rule's input set.");
+			return -rte_errno;
+		} else if (ret == -EINVAL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Invalid pattern mask.");
+			return -rte_errno;
+		}
 
-	/* Store flex mask to SW */
-	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Exceed maximal number of bitmasks");
-		return -rte_errno;
-	} else if (ret == -2) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Conflict with the first flexible rule");
-		return -rte_errno;
-	} else if (ret > 0)
-		cfg_flex_msk = false;
+		/* Store flex mask to SW */
+		ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Exceed maximal number of bitmasks");
+			return -rte_errno;
+		} else if (ret == -2) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Conflict with the first flexible rule");
+			return -rte_errno;
+		} else if (ret > 0)
+			cfg_flex_msk = false;
 
-	if (cfg_flex_pit)
-		i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+		if (cfg_flex_pit)
+			i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-	if (cfg_flex_msk)
-		i40e_flow_set_fdir_flex_msk(pf, pctype);
+		if (cfg_flex_msk)
+			i40e_flow_set_fdir_flex_msk(pf, pctype);
+	}
+
+	filter->input.pctype = pctype;
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v4 7/8] net/i40e: add cloud filter parsing function for GTP
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
                             ` (5 preceding siblings ...)
  2017-09-28  2:17           ` [PATCH v4 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  2:17           ` [PATCH v4 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
                             ` (2 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 151 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 153 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 502f6c6..436ca2c 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -703,6 +703,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ea81ecb..2bf7098 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1808,6 +1814,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3823,6 +3834,146 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+
+	if (!pf->gtp_support) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   item,
+				   "GTP is not supported by default.");
+		return -rte_errno;
+	}
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v4 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
                             ` (6 preceding siblings ...)
  2017-09-28  2:17           ` [PATCH v4 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-09-28  2:17           ` Beilei Xing
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  2:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

GTP-C & GTP-U are not supported by cloud filter due
to limited resource of HW, this patch enables GTP-C
and GTP-U cloud filter by replacing inner_mac and
TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 189 +++++++++++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_ethdev.h |  17 ++--
 drivers/net/i40e/i40e_flow.c   |  12 +--
 3 files changed, 189 insertions(+), 29 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index aba35a5..7560867 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7161,7 +7161,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
-	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 3 entries */
@@ -7209,12 +7209,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7232,12 +7232,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 
@@ -7346,6 +7465,36 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		big_buffer = 1;
 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
 		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
 			ret = i40e_cloud_filter_qinq_create(pf);
@@ -7372,13 +7521,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 
 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
-			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+			I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	else {
 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
 						&pfilter->element.flags);
@@ -10899,14 +11054,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 			   sizeof(f->input.general_fields));
 
 		if (((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
 			big_buffer = 1;
 
 		if (big_buffer)
@@ -11294,7 +11449,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 2 entries */
@@ -11325,13 +11480,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L2 filter, input for L2 filter will be L1 filter  */
 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 436ca2c..1223d7a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -650,12 +650,16 @@ struct i40e_ethertype_rule {
 
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP	8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE	9
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10		0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11		0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11		0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13		0x13
+#define I40E_AQC_NEW_TR_21			21
+#define I40E_AQC_NEW_TR_22			22
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -905,6 +909,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag;   /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2bf7098..f4c8e63 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4344,12 +4344,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		vsi = vf->vsi;
 	}
 
-	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
 		big_buffer = 1;
 
 	if (big_buffer)
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 0/8] GPT-C and GTP-U enabling
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
                             ` (7 preceding siblings ...)
  2017-09-28  2:17           ` [PATCH v4 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
@ 2017-09-28  8:13           ` Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 1/8] mbuf: support GTP in software packet type parser Beilei Xing
                               ` (7 more replies)
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
  9 siblings, 8 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
It depends on Kirill's patch:
http://dpdk.org/ml/archives/dev/2017-September/076035.html.
However, Kirill's patchset needs to be updated.

v5 changes:
 - Fix code style.
 - Reword commit log.

v4 changes:
 - Refine fdir related code.
 - Rework profile metadata parsing function.
 - Fix code style.

v3 changes:
 - Rework implementation to support the new profile.
 - Add GTPC and GTPU tunnel type in software packet type parser.
 - Update ptype info when loading profile.
 - Fix bug of updating pctype info.


v2 changes:
 - Enable RSS/FDIR/cloud filter dinamicly by checking profile
 - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
 - Rework FDIR/cloud filter enabling function

Beilei Xing (8):
  mbuf: support GTP in software packet type parser
  net/i40e: update ptype and pctype info
  net/i40e: support RSS for new pctype
  ethdev: add GTP items to support flow API
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  40 ++
 app/test-pmd/config.c                       |   3 +
 doc/guides/prog_guide/rte_flow.rst          |  18 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
 drivers/net/i40e/i40e_ethdev.c              | 534 +++++++++++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h              | 156 +++++++-
 drivers/net/i40e/i40e_fdir.c                | 570 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 496 ++++++++++++++++++++----
 drivers/net/i40e/rte_pmd_i40e.c             |   6 +-
 lib/librte_ether/rte_flow.h                 |  52 +++
 lib/librte_mbuf/rte_mbuf_ptype.c            |   2 +
 lib/librte_mbuf/rte_mbuf_ptype.h            |  24 ++
 12 files changed, 1775 insertions(+), 130 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v5 1/8] mbuf: support GTP in software packet type parser
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 2/8] net/i40e: update ptype and pctype info Beilei Xing
                               ` (6 subsequent siblings)
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
---
 lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
 lib/librte_mbuf/rte_mbuf_ptype.h | 24 ++++++++++++++++++++++++
 2 files changed, 26 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index e5c4fae..a450814 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -89,6 +89,8 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
 	case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
 	case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
 	case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
+	case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
+	case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
 	default: return "TUNNEL_UNKNOWN";
 	}
 }
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index acd70bb..eb7cd2c 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -383,6 +383,30 @@ extern "C" {
  */
 #define RTE_PTYPE_TUNNEL_GRENAT             0x00006000
 /**
+ * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2123>
+ */
+#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
+/**
+ * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2152>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2152>
+ */
+#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
+/**
  * Mask of tunneling packet types.
  */
 #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 2/8] net/i40e: update ptype and pctype info
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 1/8] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 3/8] net/i40e: support RSS for new pctype Beilei Xing
                               ` (5 subsequent siblings)
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Update new packet type and new pctype info when downloading
profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c  | 312 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h  |  24 ++++
 drivers/net/i40e/rte_pmd_i40e.c |   6 +-
 3 files changed, 341 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 720f067..d6b0d50 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -65,6 +65,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -1036,6 +1037,21 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+	int i;
+
+	/* Initialize customized pctype */
+	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+		pf->customized_pctype[i].index = i;
+		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+		pf->customized_pctype[i].valid = false;
+	}
+
+	pf->gtp_support = false;
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1301,6 +1317,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
+	/* Initialize customized information */
+	i40e_init_customized_info(pf);
+
 	ret = i40e_init_ethtype_filter_list(dev);
 	if (ret < 0)
 		goto err_init_ethtype_filter_list;
@@ -10893,6 +10912,299 @@ is_i40e_supported(struct rte_eth_dev *dev)
 	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+	int i;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].index == index)
+			return &pf->customized_pctype[i];
+	}
+	return NULL;
+}
+
+static int
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size, uint32_t proto_num,
+			      struct rte_pmd_i40e_proto_info *proto)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t pctype_num;
+	struct rte_pmd_i40e_ptype_info *pctype;
+	uint32_t buff_size;
+	struct i40e_customized_pctype *new_pctype = NULL;
+	uint8_t proto_id;
+	uint8_t pctype_value;
+	char name[64];
+	uint32_t i, j, n;
+	int ret;
+
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&pctype_num, sizeof(pctype_num),
+				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype number");
+		return -1;
+	}
+	if (!pctype_num) {
+		PMD_DRV_LOG(INFO, "No new pctype added");
+		return -1;
+	}
+
+	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+	pctype = rte_zmalloc("new_pctype", buff_size, 0);
+	if (!pctype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+	/* get information about new pctype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)pctype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype list");
+		rte_free(pctype);
+		return -1;
+	}
+
+	/* Update customized pctype. */
+	for (i = 0; i < pctype_num; i++) {
+		pctype_value = pctype[i].ptype_id;
+		memset(name, 0, sizeof(name));
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = pctype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				strcat(name, proto[n].name);
+				strcat(name, "_");
+				break;
+			}
+		}
+		name[strlen(name) - 1] = '\0';
+		if (!strcmp(name, "GTPC"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPC);
+		else if (!strcmp(name, "GTPU_IPV4"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (!strcmp(name, "GTPU_IPV6"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV6);
+		else if (!strcmp(name, "GTPU"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPU);
+		if (new_pctype) {
+			new_pctype->pctype = pctype_value;
+			new_pctype->valid = true;
+		}
+	}
+
+	rte_free(pctype);
+	return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+			       uint32_t pkg_size, uint32_t proto_num,
+			       struct rte_pmd_i40e_proto_info *proto)
+{
+	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+	uint8_t port_id = dev->data->port_id;
+	uint32_t ptype_num;
+	struct rte_pmd_i40e_ptype_info *ptype;
+	uint32_t buff_size;
+	uint8_t proto_id;
+	char name[16];
+	uint32_t i, j, n;
+	bool inner_ip;
+	int ret;
+
+	/* get information about new ptype num */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&ptype_num, sizeof(ptype_num),
+				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype number");
+		return -1;
+	}
+	if (!ptype_num) {
+		PMD_DRV_LOG(INFO, "No new ptype added");
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+	ptype = rte_zmalloc("new_ptype", buff_size, 0);
+	if (!ptype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+
+	/* get information about new ptype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)ptype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype list");
+		rte_free(ptype);
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+	if (!ptype_mapping) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		rte_free(ptype);
+		return -1;
+	}
+
+	/* Update ptype mapping table. */
+	for (i = 0; i < ptype_num; i++) {
+		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+		ptype_mapping[i].sw_ptype = 0;
+		inner_ip = false;
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = ptype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				memset(name, 0, sizeof(name));
+				strcpy(name, proto[n].name);
+				if (!strcmp(name, "IPV4") && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strcmp(name, "IPV4") && inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+				} else if (!strcmp(name, "IPV6") && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strcmp(name, "IPV6") && inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+				} else if (!strcmp(name, "IPV4FRAG")) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strcmp(name, "IPV6FRAG")) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strcmp(name, "GTPC"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPC;
+				else if (!strcmp(name, "GTPU"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPU;
+				else if (!strcmp(name, "UDP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_UDP;
+				else if (!strcmp(name, "TCP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_TCP;
+				else if (!strcmp(name, "SCTP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_SCTP;
+				else if (!strcmp(name, "ICMP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_ICMP;
+
+				break;
+			}
+		}
+	}
+
+	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+						ptype_num, 0);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+		rte_free(ptype_mapping);
+		rte_free(ptype);
+		return -1;
+	}
+
+	rte_free(ptype_mapping);
+	rte_free(ptype);
+	return 0;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t proto_num;
+	struct rte_pmd_i40e_proto_info *proto;
+	uint32_t buff_size;
+	uint32_t i;
+	int ret;
+
+	/* get information about protocol number */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				       (uint8_t *)&proto_num, sizeof(proto_num),
+				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol number");
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new protocol added");
+		return;
+	}
+
+	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+	proto = rte_zmalloc("new_proto", buff_size, 0);
+	if (!proto) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return;
+	}
+
+	/* get information about protocol list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)proto, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol list");
+		rte_free(proto);
+		return;
+	}
+
+	/* Check if GTP is supported. */
+	for (i = 0; i < proto_num; i++) {
+		if (!strncmp(proto[i].name, "GTP", 3)) {
+			pf->gtp_support = true;
+			break;
+		}
+	}
+
+	/* Update customized pctype info */
+	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+					    proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+	/* Update customized ptype info */
+	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+					   proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+	rte_free(proto);
+}
+
 /* Create a QinQ cloud filter
  *
  * The Fortville NIC has limited resources for tunnel filters,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ad80f0f..73fb5c3 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -722,6 +722,21 @@ struct i40e_tm_conf {
 	bool committed;
 };
 
+enum i40e_new_pctype {
+	I40E_CUSTOMIZED_GTPC = 0,
+	I40E_CUSTOMIZED_GTPU_IPV4,
+	I40E_CUSTOMIZED_GTPU_IPV6,
+	I40E_CUSTOMIZED_GTPU,
+	I40E_CUSTOMIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID     0
+struct i40e_customized_pctype {
+	enum i40e_new_pctype index;  /* Indicate which customized pctype */
+	uint8_t pctype;   /* New pctype value */
+	bool valid;   /* Check if it's valid */
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -786,6 +801,11 @@ struct i40e_pf {
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
+
+	/* Dynamic Device Personalization */
+	bool gtp_support; /* 1 - support GTP-C and GTP-U */
+	/* customer customized pctype */
+	struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
 };
 
 enum pending_msg {
@@ -1003,6 +1023,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+				 uint32_t pkg_size);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index 5d39044..7e0e23a 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1608,6 +1608,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -EINVAL;
 	}
 
+	i40e_update_customized_info(dev, buff, size);
+
 	/* Find metadata segment */
 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
 							pkg_hdr);
@@ -2106,7 +2108,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
 	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
 	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
 	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
-	    tnl != RTE_PTYPE_TUNNEL_GRENAT)
+	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPU)
 		return -1;
 
 	if (il2 &&
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 3/8] net/i40e: support RSS for new pctype
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 1/8] mbuf: support GTP in software packet type parser Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 4/8] ethdev: add GTP items to support flow API Beilei Xing
                               ` (4 subsequent siblings)
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Enable RSS for new pctypes after downloading
new profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index d6b0d50..aba35a5 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1928,6 +1928,31 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, true);
 }
 
+static void
+i40e_customized_pctype_hash_set(struct i40e_pf *pf, bool enable)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint64_t hena;
+	int i;
+
+	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].valid) {
+			if (enable)
+				hena |= 1ULL << pf->customized_pctype[i].pctype;
+			else
+				hena &= ~(1ULL <<
+					  pf->customized_pctype[i].pctype);
+		}
+	}
+
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+	I40E_WRITE_FLUSH(hw);
+}
+
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
@@ -2075,6 +2100,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	i40e_customized_pctype_hash_set(pf, true);
+
 	return I40E_SUCCESS;
 
 err_up:
@@ -2155,6 +2182,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 	uint32_t reg;
 	int i;
 
+	i40e_customized_pctype_hash_set(pf, false);
+
 	PMD_INIT_FUNC_TRACE();
 
 	i40e_dev_stop(dev);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 4/8] ethdev: add GTP items to support flow API
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (2 preceding siblings ...)
  2017-09-28  8:13             ` [PATCH v5 3/8] net/i40e: support RSS for new pctype Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  2017-09-28 13:43               ` Sean Harte
  2017-09-28  8:13             ` [PATCH v5 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
                               ` (3 subsequent siblings)
  7 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds GTP, GTPC and GTPU items for
generic flow API, and also exposes item fields
through the flow command.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
 app/test-pmd/config.c                       |  3 ++
 doc/guides/prog_guide/rte_flow.rst          | 18 ++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
 lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
 5 files changed, 117 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..26c3e4f 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTP,
+	ITEM_GTP_TEID,
+	ITEM_GTPC,
+	ITEM_GTPU,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +455,9 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTP,
+	ITEM_GTPC,
+	ITEM_GTPU,
 	ZERO,
 };
 
@@ -588,6 +595,12 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtp[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1434,33 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTP] = {
+		.name = "gtp",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTP_TEID] = {
+		.name = "teid",
+		.help = "tunnel endpoint identifier",
+		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
+	[ITEM_GTPC] = {
+		.name = "gtpc",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTPU] = {
+		.name = "gtpu",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index e8e311c..9b09bbd 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -949,6 +949,9 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..1bc8f19 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,24 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTP``, ``GTPC``, ``GTPU``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches a GTP header.
+
+Note: GTP, GTPC and GTPU use the same structure. Since only UDP destination port
+is used to distinguish GTP_C (port is 2123) and GTP_U packets (port is 2152),
+GTPC and GTPU item are defined for a user-friendly API when creating GTP-C and
+GTP-U flow.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: tunnel endpoint identifier.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5..8cc2399 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2696,6 +2696,10 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtp``, ``gtpc``, ``gtpu``: match GTP header.
+
+  - ``teid {unsigned}``: tunnel endpoint identifier.
+
 Actions list
 ^^^^^^^^^^^^
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..5da3aff 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,33 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTP,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-C packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPC,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-U packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPU,
 };
 
 /**
@@ -735,6 +762,31 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTP header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version (2b), protocol type (1b), reserved (1b),
+	 * Extension header flag (1b),
+	 * Sequence number flag (1b),
+	 * N-PDU number flag (1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type. */
+	rte_be16_t msg_len; /**< Message length. */
+	rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+	.teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 5/8] net/i40e: finish integration FDIR with generic flow API
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (3 preceding siblings ...)
  2017-09-28  8:13             ` [PATCH v5 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                               ` (2 subsequent siblings)
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  83 ++++++-
 drivers/net/i40e/i40e_fdir.c   | 488 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  76 +++----
 3 files changed, 584 insertions(+), 63 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 73fb5c3..4d690a1 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -461,6 +461,80 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /*
+ * A union contains the inputs for all types of flow
+ * items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/* A structure used to contain extend input of flow */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/* It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
+	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+};
+
+/* A structure used to define the input for a flow director filter entry */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/* Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/* Additional fields to match */
+};
+
+/* Behavior will be taken if FDIR match */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/* Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */
+	I40E_FDIR_REPORT_ID,            /* Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /* Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /* Report 8 flex bytes. */
+};
+
+/* A structure used to define an action when match FDIR packet filter. */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /* Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /* Behavior will be taken */
+	enum i40e_fdir_status report_status;  /* Status report option */
+	/* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/* A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/* ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /* Input set */
+	struct i40e_fdir_action action;  /* Action taken when match */
+};
+
+/*
  * Structure to store flex pit for flow diretor.
  */
 struct i40e_fdir_flex_pit {
@@ -483,7 +557,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -907,7 +981,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -981,7 +1055,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -994,6 +1068,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 84c0a1f..1072a24 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -934,6 +939,263 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1007,17 +1269,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1052,7 +1314,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1082,16 +1344,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1373,68 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES(
+			(int)i40e_flowtype_to_pctype(
+			filter->input.flow_type)));
+	} else
+		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1453,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1142,12 +1463,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
 			hw, I40E_GLQF_FD_PCTYPES(
-			(int)i40e_flowtype_to_pctype(
-			filter->input.flow_type)));
+			(int)filter->input.pctype));
 	} else
-		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+		pctype = filter->input.pctype;
 
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1302,6 +1622,140 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 }
 
 /*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR,
+		    "Failed to program FDIR filter: time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
+	for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if (i40e_check_fdir_programming_status(rxq) >= 0)
+			return 0;
+		rte_delay_us(1);
+	}
+	PMD_DRV_LOG(ERR,
+		 "Failed to program FDIR filter: programming status reported.");
+	return -ETIMEDOUT;
+}
+
+/*
  * i40e_fdir_flush - clear all filters of Flow Director table
  * @pf: board private structure
  */
@@ -1580,7 +2034,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,14 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(flow_type);
-	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2797,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2832,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2855,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2876,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2911,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3877,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3927,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (4 preceding siblings ...)
  2017-09-28  8:13             ` [PATCH v5 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds FDIR support for GTP-C and GTP-U. The
input set of GTP-C and GTP-U is TEID.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  30 +++++
 drivers/net/i40e/i40e_fdir.c   | 200 ++++++++++++++++++++++---------
 drivers/net/i40e/i40e_flow.c   | 263 +++++++++++++++++++++++++++++++++++------
 3 files changed, 396 insertions(+), 97 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 4d690a1..502f6c6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -460,6 +460,25 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/* A structure used to define the input for GTP flow */
+struct i40e_gtp_flow {
+	struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
+	uint8_t msg_type;              /* Message type. */
+	uint32_t teid;                 /* TEID in big endian. */
+};
+
+/* A structure used to define the input for GTP IPV4 flow */
+struct i40e_gtp_ipv4_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv4_flow ip4;
+};
+
+/* A structure used to define the input for GTP IPV6 flow */
+struct i40e_gtp_ipv6_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv6_flow ip6;
+};
+
 /*
  * A union contains the inputs for all types of flow
  * items in flows need to be in big endian
@@ -474,6 +493,14 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtp_flow       gtp_flow;
+	struct i40e_gtp_ipv4_flow  gtp_ipv4_flow;
+	struct i40e_gtp_ipv6_flow  gtp_ipv6_flow;
+};
+
+enum i40e_fdir_ip_type {
+	I40E_FDIR_IPTYPE_IPV4,
+	I40E_FDIR_IPTYPE_IPV6,
 };
 
 /* A structure used to contain extend input of flow */
@@ -483,6 +510,9 @@ struct i40e_fdir_flow_ext {
 	/* It is filled by the flexible payload to match. */
 	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
 	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+	bool inner_ip;   /* If there is inner ip */
+	enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
+	bool customized_pctype; /* If customized pctype is used */
 };
 
 /* A structure used to define the input for a flow director filter entry */
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 1072a24..55c86ee 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,9 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
+#define I40E_FDIR_INNER_IPv6_DEFAULT_LEN    344
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -939,16 +942,34 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static struct i40e_customized_pctype *
+i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+	struct i40e_customized_pctype *cus_pctype;
+	enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
+
+	for (; i < I40E_CUSTOMIZED_MAX; i++) {
+		cus_pctype = &pf->customized_pctype[i];
+		if (pctype == cus_pctype->pctype)
+			return cus_pctype;
+	}
+	return NULL;
+}
+
 static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+				const struct i40e_fdir_input *fdir_input,
 				unsigned char *raw_pkt,
 				bool vlan)
 {
+	struct i40e_customized_pctype *cus_pctype = NULL;
 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
 	uint16_t *ether_type;
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
+	bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -975,27 +996,30 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	if (is_customized_pctype) {
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (!cus_pctype)
+			PMD_DRV_LOG(ERR, "unknown pctype %u.",
+				    fdir_input->pctype);
+	}
+
+	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+		 is_customized_pctype) {
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
 		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
 		/* set len to by default */
 		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
-		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
-					fdir_input->flow.ip4_flow.proto :
-					next_proto[fdir_input->pctype];
 		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
-					fdir_input->flow.ip4_flow.ttl :
-					I40E_FDIR_IP_DEFAULT_TTL;
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
 		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
 		/**
 		 * The source and destination fields in the transmitted packet
@@ -1004,13 +1028,22 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		 */
 		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
 		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+		if (!is_customized_pctype)
+			ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+				fdir_input->flow.ip4_flow.proto :
+				next_proto[fdir_input->pctype];
+		else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+			ip->next_proto_id = IPPROTO_UDP;
 		len += sizeof(struct ipv4_hdr);
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		ip6 = (struct ipv6_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1021,11 +1054,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip6->payload_len =
 			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
 		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
-					fdir_input->flow.ipv6_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ipv6_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
-					fdir_input->flow.ipv6_flow.hop_limits :
-					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+			fdir_input->flow.ipv6_flow.hop_limits :
+			I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
 		/**
 		 * The source and destination fields in the transmitted packet
 		 * need to be presented in a reversed order with respect
@@ -1038,12 +1071,12 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 			   &fdir_input->flow.ipv6_flow.src_ip,
 			   IPV6_ADDR_LEN);
 		len += sizeof(struct ipv6_hdr);
-		break;
-	default:
+	} else {
 		PMD_DRV_LOG(ERR, "unknown pctype %u.",
 			    fdir_input->pctype);
 		return -1;
 	}
+
 	return len;
 }
 
@@ -1058,23 +1091,28 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			     const struct i40e_fdir_input *fdir_input,
 			     unsigned char *raw_pkt)
 {
-	unsigned char *payload, *ptr;
+	unsigned char *payload = NULL;
+	unsigned char *ptr;
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
+	struct ipv4_hdr *gtp_ipv4;
+	struct ipv6_hdr *gtp_ipv6;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
+	struct i40e_customized_pctype *cus_pctype;
 
 	/* fill the ethernet and IP head */
-	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
 					      !!fdir_input->flow_ext.vlan_tci);
 	if (len < 0)
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1085,9 +1123,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1098,9 +1134,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1111,15 +1145,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1130,9 +1160,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1143,9 +1171,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1156,14 +1182,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
 		payload = raw_pkt + len;
 		/**
 		 * ARP packet is a special case on which the payload
@@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 				rte_cpu_to_be_16(ETHER_TYPE_ARP))
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
-		return -EINVAL;
+	} else if (fdir_input->flow_ext.customized_pctype) {
+		/* If customized pctype is used */
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
+			udp = (struct udp_hdr *)(raw_pkt + len);
+			udp->dgram_len =
+				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+			gtp = (struct rte_flow_item_gtp *)
+				((unsigned char *)udp + sizeof(struct udp_hdr));
+			gtp->v_pt_rsv_flags = 0x30;
+			gtp->msg_len =
+				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+			gtp->teid = fdir_input->flow.gtp_flow.teid;
+			gtp->msg_type = 0x1;
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPC)
+				udp->dst_port = rte_cpu_to_be_16(2123);
+			else
+				udp->dst_port = rte_cpu_to_be_16(2152);
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv4 = (struct ipv4_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv4->version_ihl =
+					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+				gtp_ipv4->next_proto_id = IPPROTO_IP;
+				gtp_ipv4->total_length =
+					rte_cpu_to_be_16(
+						I40E_FDIR_INNER_IP_DEFAULT_LEN);
+				payload = (unsigned char *)gtp_ipv4 +
+					sizeof(struct ipv4_hdr);
+			} else if (cus_pctype->index ==
+				   I40E_CUSTOMIZED_GTPU_IPV6) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv6 = (struct ipv6_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv6->vtc_flow =
+					rte_cpu_to_be_32(
+					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
+				gtp_ipv6->proto = IPPROTO_NONE;
+				gtp_ipv6->payload_len =
+					rte_cpu_to_be_16(
+					      I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
+				gtp_ipv6->hop_limits =
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+				payload = (unsigned char *)gtp_ipv6 +
+					sizeof(struct ipv6_hdr);
+			} else
+				payload = (unsigned char *)gtp +
+					sizeof(struct rte_flow_item_gtp);
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
 	}
 
 	/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..ea81ecb 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +250,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1644,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	/* FDIR - support default flow type with flexible payload */
 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,6 +2378,42 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
 	return 0;
 }
 
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+				enum rte_flow_item_type item_type,
+				struct i40e_fdir_filter_conf *filter)
+{
+	struct i40e_customized_pctype *cus_pctype = NULL;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_GTPC:
+		cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPC);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTPU:
+		if (!filter->input.flow_ext.inner_ip)
+			cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPU);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV4)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV6)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type");
+		break;
+	}
+
+	if (cus_pctype)
+		return cus_pctype->pctype;
+
+	return I40E_FILTER_PCTYPE_INVALID;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
  * 3. Supported flow type and input set: refer to array
@@ -2326,14 +2438,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	uint8_t pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
 	uint32_t i, j;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2351,12 +2465,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	uint16_t outer_tpid;
 	uint16_t ether_type;
 	uint32_t vtc_flow_cpu;
+	bool outer_ip = true;
 	int ret;
 
 	memset(off_arr, 0, sizeof(off_arr));
 	memset(len_arr, 0, sizeof(len_arr));
 	memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
 	outer_tpid = i40e_get_outer_vlan(dev);
+	filter->input.flow_ext.customized_pctype = false;
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
@@ -2430,7 +2546,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv4_mask =
 				(const struct rte_flow_item_ipv4 *)item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
+			if (ipv4_spec && ipv4_mask && outer_ip) {
 				/* Check IPv4 mask and update input set */
 				if (ipv4_mask->hdr.version_ihl ||
 				    ipv4_mask->hdr.total_length ||
@@ -2475,9 +2591,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					ipv4_spec->hdr.src_addr;
 				filter->input.flow.ip4_flow.dst_ip =
 					ipv4_spec->hdr.dst_addr;
+
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV4;
+			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv4 mask.");
+				return -rte_errno;
 			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+			if (outer_ip)
+				outer_ip = false;
 
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -2487,7 +2616,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv6_mask =
 				(const struct rte_flow_item_ipv6 *)item->mask;
 
-			if (ipv6_spec && ipv6_mask) {
+			if (ipv6_spec && ipv6_mask && outer_ip) {
 				/* Check IPv6 mask and update input set */
 				if (ipv6_mask->hdr.payload_len) {
 					rte_flow_error_set(error, EINVAL,
@@ -2538,10 +2667,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				else
 					pctype =
 					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
-			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV6;
+			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv6 mask");
+				return -rte_errno;
+			}
 
+			if (outer_ip)
+				outer_ip = false;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
@@ -2636,6 +2777,37 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			if (!pf->gtp_support) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported protocol");
+				return -rte_errno;
+			}
+
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				filter->input.flow.gtp_flow.teid =
+					gtp_spec->teid;
+				filter->input.flow_ext.customized_pctype = true;
+				cus_proto = item_type;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2946,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Conflict with the first rule's input set.");
-		return -rte_errno;
-	} else if (ret == -EINVAL) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Invalid pattern mask.");
-		return -rte_errno;
+	/* Get customized pctype value */
+	if (filter->input.flow_ext.customized_pctype) {
+		pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Unsupported pctype");
+			return -rte_errno;
+		}
 	}
 
-	filter->input.pctype = pctype;
+	/* If customized pctype is not used, set fdir configuration.*/
+	if (!filter->input.flow_ext.customized_pctype) {
+		ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Conflict with the first rule's input set.");
+			return -rte_errno;
+		} else if (ret == -EINVAL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Invalid pattern mask.");
+			return -rte_errno;
+		}
 
-	/* Store flex mask to SW */
-	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Exceed maximal number of bitmasks");
-		return -rte_errno;
-	} else if (ret == -2) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Conflict with the first flexible rule");
-		return -rte_errno;
-	} else if (ret > 0)
-		cfg_flex_msk = false;
+		/* Store flex mask to SW */
+		ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Exceed maximal number of bitmasks");
+			return -rte_errno;
+		} else if (ret == -2) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Conflict with the first flexible rule");
+			return -rte_errno;
+		} else if (ret > 0)
+			cfg_flex_msk = false;
 
-	if (cfg_flex_pit)
-		i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+		if (cfg_flex_pit)
+			i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-	if (cfg_flex_msk)
-		i40e_flow_set_fdir_flex_msk(pf, pctype);
+		if (cfg_flex_msk)
+			i40e_flow_set_fdir_flex_msk(pf, pctype);
+	}
+
+	filter->input.pctype = pctype;
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 7/8] net/i40e: add cloud filter parsing function for GTP
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (5 preceding siblings ...)
  2017-09-28  8:13             ` [PATCH v5 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  2017-09-28  8:13             ` [PATCH v5 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U to support cloud filter.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 151 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 153 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 502f6c6..436ca2c 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -703,6 +703,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ea81ecb..2bf7098 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1808,6 +1814,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3823,6 +3834,146 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+
+	if (!pf->gtp_support) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   item,
+				   "GTP is not supported by default.");
+		return -rte_errno;
+	}
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v5 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (6 preceding siblings ...)
  2017-09-28  8:13             ` [PATCH v5 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-09-28  8:13             ` Beilei Xing
  7 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-28  8:13 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch sets TEID of GTP-C and GTP-U as filter type
by replacing existed filter types inner_mac and TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 193 +++++++++++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_ethdev.h |  17 ++--
 drivers/net/i40e/i40e_flow.c   |  12 +--
 3 files changed, 191 insertions(+), 31 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index aba35a5..18aa376 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7161,7 +7161,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
-	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 3 entries */
@@ -7209,12 +7209,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7232,12 +7232,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 
@@ -7328,7 +7447,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x40;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
 		break;
 	case I40E_TUNNEL_TYPE_MPLSoGRE:
 		if (!pf->mpls_replace_flag) {
@@ -7344,7 +7463,37 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x0;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+			0x0;
+		big_buffer = 1;
 		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
@@ -7372,13 +7521,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 
 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
-			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+			I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	else {
 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
 						&pfilter->element.flags);
@@ -10899,14 +11054,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 			   sizeof(f->input.general_fields));
 
 		if (((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
 			big_buffer = 1;
 
 		if (big_buffer)
@@ -11294,7 +11449,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 2 entries */
@@ -11325,13 +11480,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L2 filter, input for L2 filter will be L1 filter  */
 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 436ca2c..b223456 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -650,12 +650,16 @@ struct i40e_ethertype_rule {
 
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP	8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE	9
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10		0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11		0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11		0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13		0x13
+#define I40E_AQC_NEW_TR_21			21
+#define I40E_AQC_NEW_TR_22			22
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -905,6 +909,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag;   /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2bf7098..f4c8e63 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4344,12 +4344,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		vsi = vf->vsi;
 	}
 
-	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
 		big_buffer = 1;
 
 	if (big_buffer)
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* Re: [PATCH v5 4/8] ethdev: add GTP items to support flow API
  2017-09-28  8:13             ` [PATCH v5 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-28 13:43               ` Sean Harte
  2017-09-29  2:12                 ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Sean Harte @ 2017-09-28 13:43 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On 28 September 2017 at 09:13, Beilei Xing <beilei.xing@intel.com> wrote:
> This patch adds GTP, GTPC and GTPU items for
> generic flow API, and also exposes item fields
> through the flow command.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> ---
>  app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
>  app/test-pmd/config.c                       |  3 ++
>  doc/guides/prog_guide/rte_flow.rst          | 18 ++++++++++
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
>  lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
>  5 files changed, 117 insertions(+)
<snip>
> --- a/doc/guides/prog_guide/rte_flow.rst
> +++ b/doc/guides/prog_guide/rte_flow.rst
> @@ -955,6 +955,24 @@ Usage example, fuzzy match a TCPv4 packets:
>     | 4     | END      |
>     +-------+----------+
>
> +Item: ``GTP``, ``GTPC``, ``GTPU``
> +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> +
> +Matches a GTP header.
> +
> +Note: GTP, GTPC and GTPU use the same structure. Since only UDP destination port
> +is used to distinguish GTP_C (port is 2123) and GTP_U packets (port is 2152),
> +GTPC and GTPU item are defined for a user-friendly API when creating GTP-C and
> +GTP-U flow.

In GTPv1-C, request messages are sent from any port to port 2123, and
in the response message the ports are reversed (in GTPv2-C, it's a
little more complicated). Is the intention to only match requests?
It's not clear.

Also, it should be mentioned that GTPv0 is not included (it uses port 3386)

> +
> +- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
> +  extension header flag (1b), sequence number flag (1b), N-PDU number
> +  flag (1b).
> +- ``msg_type``: message type.
> +- ``msg_len``: message length.
> +- ``teid``: tunnel endpoint identifier.
> +- Default ``mask`` matches teid only.
> +
>  Actions
>  ~~~~~~~
>
<snip>
>  /**
> + * RTE_FLOW_ITEM_TYPE_GTP.
> + *
> + * Matches a GTP header.
> + */
> +struct rte_flow_item_gtp {
> +       /**
> +        * Version (2b), protocol type (1b), reserved (1b),
> +        * Extension header flag (1b),
> +        * Sequence number flag (1b),
> +        * N-PDU number flag (1b).
> +        */
> +       uint8_t v_pt_rsv_flags;

The version field has 3 bits, not 2 (it was correct above). The
meaning of the 5 flags in this byte is different in GTPv2-C compared
to GTPv1-C. Is the intention to only support GTPv1? If so that should
be stated. If GTPv2 is supported, then the teid field below is not
present in a few cases and matching on it could cause some strange
behaviour.

> +       uint8_t msg_type; /**< Message type. */
> +       rte_be16_t msg_len; /**< Message length. */
> +       rte_be32_t teid; /**< Tunnel endpoint identifier. */
> +};
> +
> +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
> +#ifndef __cplusplus
> +static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
> +       .teid = RTE_BE32(0xffffffff),
> +};
> +#endif
> +
> +/**
>   * Matching pattern item definition.
>   *
>   * A pattern is formed by stacking items starting from the lowest protocol
> --
> 2.5.5
>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v5 4/8] ethdev: add GTP items to support flow API
  2017-09-28 13:43               ` Sean Harte
@ 2017-09-29  2:12                 ` Xing, Beilei
  0 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29  2:12 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Thursday, September 28, 2017 9:43 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v5 4/8] ethdev: add GTP items to support
> flow API
> 
> On 28 September 2017 at 09:13, Beilei Xing <beilei.xing@intel.com> wrote:
> > This patch adds GTP, GTPC and GTPU items for generic flow API, and
> > also exposes item fields through the flow command.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > ---
> >  app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
> >  app/test-pmd/config.c                       |  3 ++
> >  doc/guides/prog_guide/rte_flow.rst          | 18 ++++++++++
> >  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
> >  lib/librte_ether/rte_flow.h                 | 52
> +++++++++++++++++++++++++++++
> >  5 files changed, 117 insertions(+)
> <snip>
> > --- a/doc/guides/prog_guide/rte_flow.rst
> > +++ b/doc/guides/prog_guide/rte_flow.rst
> > @@ -955,6 +955,24 @@ Usage example, fuzzy match a TCPv4 packets:
> >     | 4     | END      |
> >     +-------+----------+
> >
> > +Item: ``GTP``, ``GTPC``, ``GTPU``
> > +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> > +
> > +Matches a GTP header.
> > +
> > +Note: GTP, GTPC and GTPU use the same structure. Since only UDP
> > +destination port is used to distinguish GTP_C (port is 2123) and
> > +GTP_U packets (port is 2152), GTPC and GTPU item are defined for a
> > +user-friendly API when creating GTP-C and GTP-U flow.
> 
> In GTPv1-C, request messages are sent from any port to port 2123, and in the
> response message the ports are reversed (in GTPv2-C, it's a little more
> complicated). Is the intention to only match requests?
> It's not clear.
> 
> Also, it should be mentioned that GTPv0 is not included (it uses port 3386)

Thanks for the comments, will clarify them in next version.

> 
> > +
> > +- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved
> > +(1b),
> > +  extension header flag (1b), sequence number flag (1b), N-PDU number
> > +  flag (1b).
> > +- ``msg_type``: message type.
> > +- ``msg_len``: message length.
> > +- ``teid``: tunnel endpoint identifier.
> > +- Default ``mask`` matches teid only.
> > +
> >  Actions
> >  ~~~~~~~
> >
> <snip>
> >  /**
> > + * RTE_FLOW_ITEM_TYPE_GTP.
> > + *
> > + * Matches a GTP header.
> > + */
> > +struct rte_flow_item_gtp {
> > +       /**
> > +        * Version (2b), protocol type (1b), reserved (1b),
> > +        * Extension header flag (1b),
> > +        * Sequence number flag (1b),
> > +        * N-PDU number flag (1b).
> > +        */
> > +       uint8_t v_pt_rsv_flags;
> 
> The version field has 3 bits, not 2 (it was correct above). The meaning of the 5
> flags in this byte is different in GTPv2-C compared to GTPv1-C. Is the
> intention to only support GTPv1? If so that should be stated. If GTPv2 is
> supported, then the teid field below is not present in a few cases and
> matching on it could cause some strange behaviour.

Thanks for the correction, I will change version filed in next version.
And yes, we only support GTPv1 only, will clarify it.

> 
> > +       uint8_t msg_type; /**< Message type. */
> > +       rte_be16_t msg_len; /**< Message length. */
> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
> > +
> > +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */ #ifndef __cplusplus
> > +static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
> > +       .teid = RTE_BE32(0xffffffff),
> > +};
> > +#endif
> > +
> > +/**
> >   * Matching pattern item definition.
> >   *
> >   * A pattern is formed by stacking items starting from the lowest
> > protocol
> > --
> > 2.5.5
> >

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v6 0/8] GPT-C and GTP-U enabling
  2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
                             ` (8 preceding siblings ...)
  2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
@ 2017-09-29  5:18           ` Beilei Xing
  2017-09-29  5:18             ` [PATCH v6 1/8] mbuf: support GTP in software packet type parser Beilei Xing
                               ` (8 more replies)
  9 siblings, 9 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:18 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
It depends on Kirill's patch:
http://www.dpdk.org/dev/patchwork/patch/29325/

v6 changes:
 - Reword description of GTP item and GTP structure, mainly support
   GTPv1, not include GTPv0 and GTPv2.

v5 changes:
 - Fix code style.
 - Reword commit log.

v4 changes:
 - Refine fdir related code.
 - Rework profile metadata parsing function.
 - Fix code style.

v3 changes:
 - Rework implementation to support the new profile.
 - Add GTPC and GTPU tunnel type in software packet type parser.
 - Update ptype info when loading profile.
 - Fix bug of updating pctype info.


v2 changes:
 - Enable RSS/FDIR/cloud filter dinamicly by checking profile
 - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
 - Rework FDIR/cloud filter enabling function

Beilei Xing (8):
  mbuf: support GTP in software packet type parser
  net/i40e: update ptype and pctype info
  support RSS for new pctype
  ethdev: add GTP items to support flow API
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  40 ++
 app/test-pmd/config.c                       |   3 +
 doc/guides/prog_guide/rte_flow.rst          |  17 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
 drivers/net/i40e/i40e_ethdev.c              | 535 +++++++++++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h              | 156 +++++++-
 drivers/net/i40e/i40e_fdir.c                | 570 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 496 ++++++++++++++++++++----
 drivers/net/i40e/rte_pmd_i40e.c             |   6 +-
 lib/librte_ether/rte_flow.h                 |  52 +++
 lib/librte_mbuf/rte_mbuf_ptype.c            |   2 +
 lib/librte_mbuf/rte_mbuf_ptype.h            |  24 ++
 12 files changed, 1775 insertions(+), 130 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v6 1/8] mbuf: support GTP in software packet type parser
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
@ 2017-09-29  5:18             ` Beilei Xing
  2017-09-29  8:15               ` Sean Harte
  2017-09-29  5:18             ` [PATCH v6 2/8] net/i40e: update ptype and pctype info Beilei Xing
                               ` (7 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:18 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
---
 lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
 lib/librte_mbuf/rte_mbuf_ptype.h | 24 ++++++++++++++++++++++++
 2 files changed, 26 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index e5c4fae..a450814 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -89,6 +89,8 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
 	case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
 	case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
 	case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
+	case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
+	case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
 	default: return "TUNNEL_UNKNOWN";
 	}
 }
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index acd70bb..eb7cd2c 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -383,6 +383,30 @@ extern "C" {
  */
 #define RTE_PTYPE_TUNNEL_GRENAT             0x00006000
 /**
+ * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2123>
+ */
+#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
+/**
+ * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2152>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2152>
+ */
+#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
+/**
  * Mask of tunneling packet types.
  */
 #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v6 2/8] net/i40e: update ptype and pctype info
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-29  5:18             ` [PATCH v6 1/8] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-09-29  5:18             ` Beilei Xing
  2017-09-29 13:22               ` Wu, Jingjing
  2017-09-29  5:18             ` [PATCH v6 3/8] net/i40e: support RSS for new pctype Beilei Xing
                               ` (6 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:18 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Update new packet type and new pctype info when downloading
profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c  | 313 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h  |  24 +++
 drivers/net/i40e/rte_pmd_i40e.c |   6 +-
 3 files changed, 342 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index acdf0de..a1371dc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -65,6 +65,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -1042,6 +1043,21 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+	int i;
+
+	/* Initialize customized pctype */
+	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+		pf->customized_pctype[i].index = i;
+		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+		pf->customized_pctype[i].valid = false;
+	}
+
+	pf->gtp_support = false;
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1307,6 +1323,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
+	/* Initialize customized information */
+	i40e_init_customized_info(pf);
+
 	ret = i40e_init_ethtype_filter_list(dev);
 	if (ret < 0)
 		goto err_init_ethtype_filter_list;
@@ -10913,6 +10932,300 @@ is_i40e_supported(struct rte_eth_dev *dev)
 	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+	int i;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].index == index)
+			return &pf->customized_pctype[i];
+	}
+	return NULL;
+}
+
+static int
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size, uint32_t proto_num,
+			      struct rte_pmd_i40e_proto_info *proto)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t pctype_num;
+	struct rte_pmd_i40e_ptype_info *pctype;
+	uint32_t buff_size;
+	struct i40e_customized_pctype *new_pctype = NULL;
+	uint8_t proto_id;
+	uint8_t pctype_value;
+	char name[64];
+	uint32_t i, j, n;
+	int ret;
+
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&pctype_num, sizeof(pctype_num),
+				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype number");
+		return -1;
+	}
+	if (!pctype_num) {
+		PMD_DRV_LOG(INFO, "No new pctype added");
+		return -1;
+	}
+
+	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+	pctype = rte_zmalloc("new_pctype", buff_size, 0);
+	if (!pctype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+	/* get information about new pctype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)pctype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype list");
+		rte_free(pctype);
+		return -1;
+	}
+
+	/* Update customized pctype. */
+	for (i = 0; i < pctype_num; i++) {
+		pctype_value = pctype[i].ptype_id;
+		memset(name, 0, sizeof(name));
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = pctype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				strcat(name, proto[n].name);
+				strcat(name, "_");
+				break;
+			}
+		}
+		name[strlen(name) - 1] = '\0';
+		if (!strcmp(name, "GTPC"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPC);
+		else if (!strcmp(name, "GTPU_IPV4"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (!strcmp(name, "GTPU_IPV6"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV6);
+		else if (!strcmp(name, "GTPU"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPU);
+		if (new_pctype) {
+			new_pctype->pctype = pctype_value;
+			new_pctype->valid = true;
+		}
+	}
+
+	rte_free(pctype);
+	return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+			       uint32_t pkg_size, uint32_t proto_num,
+			       struct rte_pmd_i40e_proto_info *proto)
+{
+	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+	uint8_t port_id = dev->data->port_id;
+	uint32_t ptype_num;
+	struct rte_pmd_i40e_ptype_info *ptype;
+	uint32_t buff_size;
+	uint8_t proto_id;
+	char name[16];
+	uint32_t i, j, n;
+	bool inner_ip;
+	int ret;
+
+	/* get information about new ptype num */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&ptype_num, sizeof(ptype_num),
+				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype number");
+		return -1;
+	}
+	if (!ptype_num) {
+		PMD_DRV_LOG(INFO, "No new ptype added");
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+	ptype = rte_zmalloc("new_ptype", buff_size, 0);
+	if (!ptype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+
+	/* get information about new ptype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)ptype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype list");
+		rte_free(ptype);
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+	if (!ptype_mapping) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		rte_free(ptype);
+		return -1;
+	}
+
+	/* Update ptype mapping table. */
+	for (i = 0; i < ptype_num; i++) {
+		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+		ptype_mapping[i].sw_ptype = 0;
+		inner_ip = false;
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = ptype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				memset(name, 0, sizeof(name));
+				strcpy(name, proto[n].name);
+				if (!strcmp(name, "IPV4") && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strcmp(name, "IPV4") && inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+				} else if (!strcmp(name, "IPV6") && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strcmp(name, "IPV6") && inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+				} else if (!strcmp(name, "IPV4FRAG")) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strcmp(name, "IPV6FRAG")) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strcmp(name, "GTPC"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPC;
+				else if (!strcmp(name, "GTPU"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPU;
+				else if (!strcmp(name, "UDP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_UDP;
+				else if (!strcmp(name, "TCP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_TCP;
+				else if (!strcmp(name, "SCTP"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_SCTP;
+				else if (!strcmp(name, "ICMP") ||
+					 !strcmp(name, "ICMPV6"))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_ICMP;
+
+				break;
+			}
+		}
+	}
+
+	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+						ptype_num, 0);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+		rte_free(ptype_mapping);
+		rte_free(ptype);
+		return -1;
+	}
+
+	rte_free(ptype_mapping);
+	rte_free(ptype);
+	return 0;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t proto_num;
+	struct rte_pmd_i40e_proto_info *proto;
+	uint32_t buff_size;
+	uint32_t i;
+	int ret;
+
+	/* get information about protocol number */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				       (uint8_t *)&proto_num, sizeof(proto_num),
+				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol number");
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new protocol added");
+		return;
+	}
+
+	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+	proto = rte_zmalloc("new_proto", buff_size, 0);
+	if (!proto) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return;
+	}
+
+	/* get information about protocol list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)proto, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol list");
+		rte_free(proto);
+		return;
+	}
+
+	/* Check if GTP is supported. */
+	for (i = 0; i < proto_num; i++) {
+		if (!strncmp(proto[i].name, "GTP", 3)) {
+			pf->gtp_support = true;
+			break;
+		}
+	}
+
+	/* Update customized pctype info */
+	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+					    proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+	/* Update customized ptype info */
+	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+					   proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+	rte_free(proto);
+}
+
 /* Create a QinQ cloud filter
  *
  * The Fortville NIC has limited resources for tunnel filters,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ad80f0f..73fb5c3 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -722,6 +722,21 @@ struct i40e_tm_conf {
 	bool committed;
 };
 
+enum i40e_new_pctype {
+	I40E_CUSTOMIZED_GTPC = 0,
+	I40E_CUSTOMIZED_GTPU_IPV4,
+	I40E_CUSTOMIZED_GTPU_IPV6,
+	I40E_CUSTOMIZED_GTPU,
+	I40E_CUSTOMIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID     0
+struct i40e_customized_pctype {
+	enum i40e_new_pctype index;  /* Indicate which customized pctype */
+	uint8_t pctype;   /* New pctype value */
+	bool valid;   /* Check if it's valid */
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -786,6 +801,11 @@ struct i40e_pf {
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
+
+	/* Dynamic Device Personalization */
+	bool gtp_support; /* 1 - support GTP-C and GTP-U */
+	/* customer customized pctype */
+	struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
 };
 
 enum pending_msg {
@@ -1003,6 +1023,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+				 uint32_t pkg_size);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index f57e59b..5aa9c69 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1608,6 +1608,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -EINVAL;
 	}
 
+	i40e_update_customized_info(dev, buff, size);
+
 	/* Find metadata segment */
 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
 							pkg_hdr);
@@ -2109,7 +2111,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
 	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
 	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
 	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
-	    tnl != RTE_PTYPE_TUNNEL_GRENAT)
+	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPU)
 		return -1;
 
 	if (il2 &&
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v6 3/8] net/i40e: support RSS for new pctype
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
  2017-09-29  5:18             ` [PATCH v6 1/8] mbuf: support GTP in software packet type parser Beilei Xing
  2017-09-29  5:18             ` [PATCH v6 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-09-29  5:18             ` Beilei Xing
  2017-09-29 13:24               ` Wu, Jingjing
  2017-09-29  5:18             ` [PATCH v6 4/8] ethdev: add GTP items to support flow API Beilei Xing
                               ` (5 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:18 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Enable RSS for new pctypes after downloading
new profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index a1371dc..57d9bb3 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1934,6 +1934,31 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, true);
 }
 
+static void
+i40e_customized_pctype_hash_set(struct i40e_pf *pf, bool enable)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint64_t hena;
+	int i;
+
+	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].valid) {
+			if (enable)
+				hena |= 1ULL << pf->customized_pctype[i].pctype;
+			else
+				hena &= ~(1ULL <<
+					  pf->customized_pctype[i].pctype);
+		}
+	}
+
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+	I40E_WRITE_FLUSH(hw);
+}
+
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
@@ -2081,6 +2106,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	i40e_customized_pctype_hash_set(pf, true);
+
 	return I40E_SUCCESS;
 
 err_up:
@@ -2155,6 +2182,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
+	i40e_customized_pctype_hash_set(pf, false);
+
 	PMD_INIT_FUNC_TRACE();
 
 	i40e_dev_stop(dev);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (2 preceding siblings ...)
  2017-09-29  5:18             ` [PATCH v6 3/8] net/i40e: support RSS for new pctype Beilei Xing
@ 2017-09-29  5:18             ` Beilei Xing
  2017-09-29  8:15               ` Sean Harte
  2017-09-29  5:18             ` [PATCH v6 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
                               ` (4 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:18 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds GTP, GTPC and GTPU items for
generic flow API, and also exposes item fields
through the flow command.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
 app/test-pmd/config.c                       |  3 ++
 doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
 lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
 5 files changed, 116 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..26c3e4f 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTP,
+	ITEM_GTP_TEID,
+	ITEM_GTPC,
+	ITEM_GTPU,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +455,9 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTP,
+	ITEM_GTPC,
+	ITEM_GTPU,
 	ZERO,
 };
 
@@ -588,6 +595,12 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtp[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1434,33 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTP] = {
+		.name = "gtp",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTP_TEID] = {
+		.name = "teid",
+		.help = "tunnel endpoint identifier",
+		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
+	[ITEM_GTPC] = {
+		.name = "gtpc",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTPU] = {
+		.name = "gtpu",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index e8e311c..9b09bbd 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -949,6 +949,9 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..73f12ee 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,23 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTP``, ``GTPC``, ``GTPU``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches a GTPv1 header.
+
+Note: GTP, GTPC and GTPU use the same structure. GTPC and GTPU item
+are defined for a user-friendly API when creating GTP-C and GTP-U
+flow rules.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: tunnel endpoint identifier.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5..4c2facc 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2696,6 +2696,10 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtp``, ``gtpc``, ``gtpu``: match GTPv1 header.
+
+  - ``teid {unsigned}``: tunnel endpoint identifier.
+
 Actions list
 ^^^^^^^^^^^^
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..b1a1b97 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,33 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTP,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-C packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPC,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-U packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPU,
 };
 
 /**
@@ -735,6 +762,31 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTPv1 header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version (3b), protocol type (1b), reserved (1b),
+	 * Extension header flag (1b),
+	 * Sequence number flag (1b),
+	 * N-PDU number flag (1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type. */
+	rte_be16_t msg_len; /**< Message length. */
+	rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+	.teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v6 5/8] net/i40e: finish integration FDIR with generic flow API
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (3 preceding siblings ...)
  2017-09-29  5:18             ` [PATCH v6 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-29  5:18             ` Beilei Xing
  2017-09-29 13:28               ` Wu, Jingjing
  2017-09-29  5:19             ` [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                               ` (3 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:18 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  83 ++++++-
 drivers/net/i40e/i40e_fdir.c   | 488 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  76 +++----
 3 files changed, 584 insertions(+), 63 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 73fb5c3..4d690a1 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -461,6 +461,80 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /*
+ * A union contains the inputs for all types of flow
+ * items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/* A structure used to contain extend input of flow */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/* It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
+	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+};
+
+/* A structure used to define the input for a flow director filter entry */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/* Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/* Additional fields to match */
+};
+
+/* Behavior will be taken if FDIR match */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/* Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */
+	I40E_FDIR_REPORT_ID,            /* Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /* Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /* Report 8 flex bytes. */
+};
+
+/* A structure used to define an action when match FDIR packet filter. */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /* Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /* Behavior will be taken */
+	enum i40e_fdir_status report_status;  /* Status report option */
+	/* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/* A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/* ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /* Input set */
+	struct i40e_fdir_action action;  /* Action taken when match */
+};
+
+/*
  * Structure to store flex pit for flow diretor.
  */
 struct i40e_fdir_flex_pit {
@@ -483,7 +557,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -907,7 +981,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -981,7 +1055,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -994,6 +1068,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 84c0a1f..1072a24 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -934,6 +939,263 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1007,17 +1269,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1052,7 +1314,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1082,16 +1344,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1373,68 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES(
+			(int)i40e_flowtype_to_pctype(
+			filter->input.flow_type)));
+	} else
+		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1453,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1142,12 +1463,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
 			hw, I40E_GLQF_FD_PCTYPES(
-			(int)i40e_flowtype_to_pctype(
-			filter->input.flow_type)));
+			(int)filter->input.pctype));
 	} else
-		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+		pctype = filter->input.pctype;
 
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1302,6 +1622,140 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 }
 
 /*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR,
+		    "Failed to program FDIR filter: time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
+	for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if (i40e_check_fdir_programming_status(rxq) >= 0)
+			return 0;
+		rte_delay_us(1);
+	}
+	PMD_DRV_LOG(ERR,
+		 "Failed to program FDIR filter: programming status reported.");
+	return -ETIMEDOUT;
+}
+
+/*
  * i40e_fdir_flush - clear all filters of Flow Director table
  * @pf: board private structure
  */
@@ -1580,7 +2034,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,14 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(flow_type);
-	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2797,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2832,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2855,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2876,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2911,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3877,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3927,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (4 preceding siblings ...)
  2017-09-29  5:18             ` [PATCH v6 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-09-29  5:19             ` Beilei Xing
  2017-09-29  8:15               ` Sean Harte
  2017-09-29  5:19             ` [PATCH v6 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
                               ` (2 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:19 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds FDIR support for GTP-C and GTP-U. The
input set of GTP-C and GTP-U is TEID.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  30 +++++
 drivers/net/i40e/i40e_fdir.c   | 200 ++++++++++++++++++++++---------
 drivers/net/i40e/i40e_flow.c   | 263 +++++++++++++++++++++++++++++++++++------
 3 files changed, 396 insertions(+), 97 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 4d690a1..502f6c6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -460,6 +460,25 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/* A structure used to define the input for GTP flow */
+struct i40e_gtp_flow {
+	struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
+	uint8_t msg_type;              /* Message type. */
+	uint32_t teid;                 /* TEID in big endian. */
+};
+
+/* A structure used to define the input for GTP IPV4 flow */
+struct i40e_gtp_ipv4_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv4_flow ip4;
+};
+
+/* A structure used to define the input for GTP IPV6 flow */
+struct i40e_gtp_ipv6_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv6_flow ip6;
+};
+
 /*
  * A union contains the inputs for all types of flow
  * items in flows need to be in big endian
@@ -474,6 +493,14 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtp_flow       gtp_flow;
+	struct i40e_gtp_ipv4_flow  gtp_ipv4_flow;
+	struct i40e_gtp_ipv6_flow  gtp_ipv6_flow;
+};
+
+enum i40e_fdir_ip_type {
+	I40E_FDIR_IPTYPE_IPV4,
+	I40E_FDIR_IPTYPE_IPV6,
 };
 
 /* A structure used to contain extend input of flow */
@@ -483,6 +510,9 @@ struct i40e_fdir_flow_ext {
 	/* It is filled by the flexible payload to match. */
 	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
 	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+	bool inner_ip;   /* If there is inner ip */
+	enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
+	bool customized_pctype; /* If customized pctype is used */
 };
 
 /* A structure used to define the input for a flow director filter entry */
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 1072a24..55c86ee 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,9 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
+#define I40E_FDIR_INNER_IPv6_DEFAULT_LEN    344
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -939,16 +942,34 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static struct i40e_customized_pctype *
+i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+	struct i40e_customized_pctype *cus_pctype;
+	enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
+
+	for (; i < I40E_CUSTOMIZED_MAX; i++) {
+		cus_pctype = &pf->customized_pctype[i];
+		if (pctype == cus_pctype->pctype)
+			return cus_pctype;
+	}
+	return NULL;
+}
+
 static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+				const struct i40e_fdir_input *fdir_input,
 				unsigned char *raw_pkt,
 				bool vlan)
 {
+	struct i40e_customized_pctype *cus_pctype = NULL;
 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
 	uint16_t *ether_type;
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
+	bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -975,27 +996,30 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	if (is_customized_pctype) {
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (!cus_pctype)
+			PMD_DRV_LOG(ERR, "unknown pctype %u.",
+				    fdir_input->pctype);
+	}
+
+	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+		 is_customized_pctype) {
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
 		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
 		/* set len to by default */
 		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
-		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
-					fdir_input->flow.ip4_flow.proto :
-					next_proto[fdir_input->pctype];
 		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
-					fdir_input->flow.ip4_flow.ttl :
-					I40E_FDIR_IP_DEFAULT_TTL;
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
 		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
 		/**
 		 * The source and destination fields in the transmitted packet
@@ -1004,13 +1028,22 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		 */
 		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
 		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+		if (!is_customized_pctype)
+			ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+				fdir_input->flow.ip4_flow.proto :
+				next_proto[fdir_input->pctype];
+		else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+			ip->next_proto_id = IPPROTO_UDP;
 		len += sizeof(struct ipv4_hdr);
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		ip6 = (struct ipv6_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1021,11 +1054,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip6->payload_len =
 			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
 		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
-					fdir_input->flow.ipv6_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ipv6_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
-					fdir_input->flow.ipv6_flow.hop_limits :
-					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+			fdir_input->flow.ipv6_flow.hop_limits :
+			I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
 		/**
 		 * The source and destination fields in the transmitted packet
 		 * need to be presented in a reversed order with respect
@@ -1038,12 +1071,12 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 			   &fdir_input->flow.ipv6_flow.src_ip,
 			   IPV6_ADDR_LEN);
 		len += sizeof(struct ipv6_hdr);
-		break;
-	default:
+	} else {
 		PMD_DRV_LOG(ERR, "unknown pctype %u.",
 			    fdir_input->pctype);
 		return -1;
 	}
+
 	return len;
 }
 
@@ -1058,23 +1091,28 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			     const struct i40e_fdir_input *fdir_input,
 			     unsigned char *raw_pkt)
 {
-	unsigned char *payload, *ptr;
+	unsigned char *payload = NULL;
+	unsigned char *ptr;
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
+	struct ipv4_hdr *gtp_ipv4;
+	struct ipv6_hdr *gtp_ipv6;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
+	struct i40e_customized_pctype *cus_pctype;
 
 	/* fill the ethernet and IP head */
-	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
 					      !!fdir_input->flow_ext.vlan_tci);
 	if (len < 0)
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1085,9 +1123,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1098,9 +1134,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1111,15 +1145,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1130,9 +1160,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1143,9 +1171,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1156,14 +1182,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
 		payload = raw_pkt + len;
 		/**
 		 * ARP packet is a special case on which the payload
@@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 				rte_cpu_to_be_16(ETHER_TYPE_ARP))
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
-		return -EINVAL;
+	} else if (fdir_input->flow_ext.customized_pctype) {
+		/* If customized pctype is used */
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
+			udp = (struct udp_hdr *)(raw_pkt + len);
+			udp->dgram_len =
+				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+			gtp = (struct rte_flow_item_gtp *)
+				((unsigned char *)udp + sizeof(struct udp_hdr));
+			gtp->v_pt_rsv_flags = 0x30;
+			gtp->msg_len =
+				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+			gtp->teid = fdir_input->flow.gtp_flow.teid;
+			gtp->msg_type = 0x1;
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPC)
+				udp->dst_port = rte_cpu_to_be_16(2123);
+			else
+				udp->dst_port = rte_cpu_to_be_16(2152);
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv4 = (struct ipv4_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv4->version_ihl =
+					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+				gtp_ipv4->next_proto_id = IPPROTO_IP;
+				gtp_ipv4->total_length =
+					rte_cpu_to_be_16(
+						I40E_FDIR_INNER_IP_DEFAULT_LEN);
+				payload = (unsigned char *)gtp_ipv4 +
+					sizeof(struct ipv4_hdr);
+			} else if (cus_pctype->index ==
+				   I40E_CUSTOMIZED_GTPU_IPV6) {
+				gtp->msg_type = 0xFF;
+				gtp_ipv6 = (struct ipv6_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv6->vtc_flow =
+					rte_cpu_to_be_32(
+					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
+				gtp_ipv6->proto = IPPROTO_NONE;
+				gtp_ipv6->payload_len =
+					rte_cpu_to_be_16(
+					      I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
+				gtp_ipv6->hop_limits =
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+				payload = (unsigned char *)gtp_ipv6 +
+					sizeof(struct ipv6_hdr);
+			} else
+				payload = (unsigned char *)gtp +
+					sizeof(struct rte_flow_item_gtp);
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
 	}
 
 	/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..ea81ecb 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +250,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1644,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	/* FDIR - support default flow type with flexible payload */
 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,6 +2378,42 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
 	return 0;
 }
 
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+				enum rte_flow_item_type item_type,
+				struct i40e_fdir_filter_conf *filter)
+{
+	struct i40e_customized_pctype *cus_pctype = NULL;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_GTPC:
+		cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPC);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTPU:
+		if (!filter->input.flow_ext.inner_ip)
+			cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPU);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV4)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV6)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type");
+		break;
+	}
+
+	if (cus_pctype)
+		return cus_pctype->pctype;
+
+	return I40E_FILTER_PCTYPE_INVALID;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
  * 3. Supported flow type and input set: refer to array
@@ -2326,14 +2438,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	uint8_t pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
 	uint32_t i, j;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2351,12 +2465,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	uint16_t outer_tpid;
 	uint16_t ether_type;
 	uint32_t vtc_flow_cpu;
+	bool outer_ip = true;
 	int ret;
 
 	memset(off_arr, 0, sizeof(off_arr));
 	memset(len_arr, 0, sizeof(len_arr));
 	memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
 	outer_tpid = i40e_get_outer_vlan(dev);
+	filter->input.flow_ext.customized_pctype = false;
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
@@ -2430,7 +2546,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv4_mask =
 				(const struct rte_flow_item_ipv4 *)item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
+			if (ipv4_spec && ipv4_mask && outer_ip) {
 				/* Check IPv4 mask and update input set */
 				if (ipv4_mask->hdr.version_ihl ||
 				    ipv4_mask->hdr.total_length ||
@@ -2475,9 +2591,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					ipv4_spec->hdr.src_addr;
 				filter->input.flow.ip4_flow.dst_ip =
 					ipv4_spec->hdr.dst_addr;
+
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV4;
+			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv4 mask.");
+				return -rte_errno;
 			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+			if (outer_ip)
+				outer_ip = false;
 
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -2487,7 +2616,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv6_mask =
 				(const struct rte_flow_item_ipv6 *)item->mask;
 
-			if (ipv6_spec && ipv6_mask) {
+			if (ipv6_spec && ipv6_mask && outer_ip) {
 				/* Check IPv6 mask and update input set */
 				if (ipv6_mask->hdr.payload_len) {
 					rte_flow_error_set(error, EINVAL,
@@ -2538,10 +2667,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				else
 					pctype =
 					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
-			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV6;
+			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv6 mask");
+				return -rte_errno;
+			}
 
+			if (outer_ip)
+				outer_ip = false;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
@@ -2636,6 +2777,37 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			if (!pf->gtp_support) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported protocol");
+				return -rte_errno;
+			}
+
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				filter->input.flow.gtp_flow.teid =
+					gtp_spec->teid;
+				filter->input.flow_ext.customized_pctype = true;
+				cus_proto = item_type;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2946,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Conflict with the first rule's input set.");
-		return -rte_errno;
-	} else if (ret == -EINVAL) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Invalid pattern mask.");
-		return -rte_errno;
+	/* Get customized pctype value */
+	if (filter->input.flow_ext.customized_pctype) {
+		pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Unsupported pctype");
+			return -rte_errno;
+		}
 	}
 
-	filter->input.pctype = pctype;
+	/* If customized pctype is not used, set fdir configuration.*/
+	if (!filter->input.flow_ext.customized_pctype) {
+		ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Conflict with the first rule's input set.");
+			return -rte_errno;
+		} else if (ret == -EINVAL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Invalid pattern mask.");
+			return -rte_errno;
+		}
 
-	/* Store flex mask to SW */
-	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Exceed maximal number of bitmasks");
-		return -rte_errno;
-	} else if (ret == -2) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Conflict with the first flexible rule");
-		return -rte_errno;
-	} else if (ret > 0)
-		cfg_flex_msk = false;
+		/* Store flex mask to SW */
+		ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Exceed maximal number of bitmasks");
+			return -rte_errno;
+		} else if (ret == -2) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Conflict with the first flexible rule");
+			return -rte_errno;
+		} else if (ret > 0)
+			cfg_flex_msk = false;
 
-	if (cfg_flex_pit)
-		i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+		if (cfg_flex_pit)
+			i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-	if (cfg_flex_msk)
-		i40e_flow_set_fdir_flex_msk(pf, pctype);
+		if (cfg_flex_msk)
+			i40e_flow_set_fdir_flex_msk(pf, pctype);
+	}
+
+	filter->input.pctype = pctype;
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v6 7/8] net/i40e: add cloud filter parsing function for GTP
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (5 preceding siblings ...)
  2017-09-29  5:19             ` [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-09-29  5:19             ` Beilei Xing
  2017-09-29  5:19             ` [PATCH v6 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
  8 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:19 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U to support cloud filter.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 151 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 153 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 502f6c6..436ca2c 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -703,6 +703,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ea81ecb..2bf7098 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1808,6 +1814,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3823,6 +3834,146 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+
+	if (!pf->gtp_support) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   item,
+				   "GTP is not supported by default.");
+		return -rte_errno;
+	}
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v6 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (6 preceding siblings ...)
  2017-09-29  5:19             ` [PATCH v6 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-09-29  5:19             ` Beilei Xing
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
  8 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-29  5:19 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch sets TEID of GTP-C and GTP-U as filter type
by replacing existed filter types inner_mac and TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 193 +++++++++++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_ethdev.h |  17 ++--
 drivers/net/i40e/i40e_flow.c   |  12 +--
 3 files changed, 191 insertions(+), 31 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 57d9bb3..ec0e23c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7181,7 +7181,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
-	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 3 entries */
@@ -7229,12 +7229,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7252,12 +7252,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 
@@ -7348,7 +7467,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x40;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
 		break;
 	case I40E_TUNNEL_TYPE_MPLSoGRE:
 		if (!pf->mpls_replace_flag) {
@@ -7364,7 +7483,37 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x0;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+			0x0;
+		big_buffer = 1;
 		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
@@ -7392,13 +7541,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 
 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
-			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+			I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	else {
 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
 						&pfilter->element.flags);
@@ -10919,14 +11074,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 			   sizeof(f->input.general_fields));
 
 		if (((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
 			big_buffer = 1;
 
 		if (big_buffer)
@@ -11315,7 +11470,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 2 entries */
@@ -11346,13 +11501,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L2 filter, input for L2 filter will be L1 filter  */
 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 436ca2c..b223456 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -650,12 +650,16 @@ struct i40e_ethertype_rule {
 
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP	8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE	9
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10		0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11		0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11		0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13		0x13
+#define I40E_AQC_NEW_TR_21			21
+#define I40E_AQC_NEW_TR_22			22
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -905,6 +909,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag;   /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2bf7098..f4c8e63 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4344,12 +4344,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		vsi = vf->vsi;
 	}
 
-	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
 		big_buffer = 1;
 
 	if (big_buffer)
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 1/8] mbuf: support GTP in software packet type parser
  2017-09-29  5:18             ` [PATCH v6 1/8] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-09-29  8:15               ` Sean Harte
  2017-09-29  8:41                 ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Sean Harte @ 2017-09-29  8:15 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On 29 September 2017 at 06:18, Beilei Xing <beilei.xing@intel.com> wrote:
> Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Acked-by: Olivier Matz <olivier.matz@6wind.com>
> ---
>  lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
>  lib/librte_mbuf/rte_mbuf_ptype.h | 24 ++++++++++++++++++++++++
>  2 files changed, 26 insertions(+)

<snip>

>  /**
> + * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
> + * Packet format:
> + * <'ether type'=0x0800
> + * | 'version'=4, 'protocol'=17
> + * | 'destination port'=2123>
> + * or,
> + * <'ether type'=0x86DD
> + * | 'version'=6, 'next header'=17
> + * | 'destination port'=2123>
> + */
> +#define RTE_PTYPE_TUNNEL_GTPC               0x00007000

This isn't a good description of GTP-C. GTP-C messages have a source
port of 2123, or a destination port of 2123.

> +/**
> + * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
> + * Packet format:
> + * <'ether type'=0x0800
> + * | 'version'=4, 'protocol'=17
> + * | 'destination port'=2152>
> + * or,
> + * <'ether type'=0x86DD
> + * | 'version'=6, 'next header'=17
> + * | 'destination port'=2152>
> + */
> +#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
> +/**
>   * Mask of tunneling packet types.
>   */
>  #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
> --
> 2.5.5
>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-09-29  5:18             ` [PATCH v6 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-29  8:15               ` Sean Harte
  2017-09-29  8:54                 ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Sean Harte @ 2017-09-29  8:15 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On 29 September 2017 at 06:18, Beilei Xing <beilei.xing@intel.com> wrote:
> This patch adds GTP, GTPC and GTPU items for
> generic flow API, and also exposes item fields
> through the flow command.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> ---
>  app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
>  app/test-pmd/config.c                       |  3 ++
>  doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
>  lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
>  5 files changed, 116 insertions(+)

<snip>

>  /**
> + * RTE_FLOW_ITEM_TYPE_GTP.
> + *
> + * Matches a GTPv1 header.
> + */
> +struct rte_flow_item_gtp {
> +       /**
> +        * Version (3b), protocol type (1b), reserved (1b),
> +        * Extension header flag (1b),
> +        * Sequence number flag (1b),
> +        * N-PDU number flag (1b).
> +        */
> +       uint8_t v_pt_rsv_flags;
> +       uint8_t msg_type; /**< Message type. */
> +       rte_be16_t msg_len; /**< Message length. */
> +       rte_be32_t teid; /**< Tunnel endpoint identifier. */
> +};

In future, you might add support for GTPv2 (which is used since LTE).
Maybe this structure should have v1 in its name to avoid confusion?

> +
> +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
> +#ifndef __cplusplus
> +static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
> +       .teid = RTE_BE32(0xffffffff),
> +};
> +#endif
> +
> +/**
>   * Matching pattern item definition.
>   *
>   * A pattern is formed by stacking items starting from the lowest protocol
> --
> 2.5.5
>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29  5:19             ` [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-09-29  8:15               ` Sean Harte
  2017-09-29  9:33                 ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Sean Harte @ 2017-09-29  8:15 UTC (permalink / raw)
  To: Beilei Xing; +Cc: jingjing.wu, andrey.chilikin, dev

On 29 September 2017 at 06:19, Beilei Xing <beilei.xing@intel.com> wrote:
> This patch adds FDIR support for GTP-C and GTP-U. The
> input set of GTP-C and GTP-U is TEID.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/i40e/i40e_ethdev.h |  30 +++++
>  drivers/net/i40e/i40e_fdir.c   | 200 ++++++++++++++++++++++---------
>  drivers/net/i40e/i40e_flow.c   | 263 +++++++++++++++++++++++++++++++++++------
>  3 files changed, 396 insertions(+), 97 deletions(-)

<snip>

> @@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
>                                 rte_cpu_to_be_16(ETHER_TYPE_ARP))
>                         payload += sizeof(struct arp_hdr);
>                 set_idx = I40E_FLXPLD_L2_IDX;
> -               break;
> -       default:
> -               PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
> -               return -EINVAL;
> +       } else if (fdir_input->flow_ext.customized_pctype) {
> +               /* If customized pctype is used */
> +               cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
> +               if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
> +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
> +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
> +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
> +                       udp = (struct udp_hdr *)(raw_pkt + len);
> +                       udp->dgram_len =
> +                               rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
> +
> +                       gtp = (struct rte_flow_item_gtp *)
> +                               ((unsigned char *)udp + sizeof(struct udp_hdr));
> +                       gtp->v_pt_rsv_flags = 0x30;

0x30 isn't valid for GTP-C, the sequence number must be present in
GTP-C so it will be 0x32 or more. Is this byte actually matched
against by the device using the GTP pctypes?

> +                       gtp->msg_len =
> +                               rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
> +                       gtp->teid = fdir_input->flow.gtp_flow.teid;
> +                       gtp->msg_type = 0x1;

Why use this value?

> +
> +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPC)
> +                               udp->dst_port = rte_cpu_to_be_16(2123);

This will only match half of GTP-C messages. GTP-C messages have a UDP
port destination of 2123, or a UDP source port of 2123. To match all
GTP-C packets you need to look at both.

> +                       else
> +                               udp->dst_port = rte_cpu_to_be_16(2152);
> +
> +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
> +                               gtp->msg_type = 0xFF;
> +                               gtp_ipv4 = (struct ipv4_hdr *)
> +                                       ((unsigned char *)gtp +
> +                                        sizeof(struct rte_flow_item_gtp));

This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
allowed to have a sequence number, which adds an extra 4 bytes to the
GTP header.

> +                               gtp_ipv4->version_ihl =
> +                                       I40E_FDIR_IP_DEFAULT_VERSION_IHL;
> +                               gtp_ipv4->next_proto_id = IPPROTO_IP;
> +                               gtp_ipv4->total_length =
> +                                       rte_cpu_to_be_16(
> +                                               I40E_FDIR_INNER_IP_DEFAULT_LEN);
> +                               payload = (unsigned char *)gtp_ipv4 +
> +                                       sizeof(struct ipv4_hdr);
> +                       } else if (cus_pctype->index ==
> +                                  I40E_CUSTOMIZED_GTPU_IPV6) {
> +                               gtp->msg_type = 0xFF;
> +                               gtp_ipv6 = (struct ipv6_hdr *)
> +                                       ((unsigned char *)gtp +
> +                                        sizeof(struct rte_flow_item_gtp));

This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
allowed to have a sequence number, which adds an extra 4 bytes to the
GTP header.

> +                               gtp_ipv6->vtc_flow =
> +                                       rte_cpu_to_be_32(
> +                                              I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
> +                                              (0 << I40E_FDIR_IPv6_TC_OFFSET));
> +                               gtp_ipv6->proto = IPPROTO_NONE;
> +                               gtp_ipv6->payload_len =
> +                                       rte_cpu_to_be_16(
> +                                             I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
> +                               gtp_ipv6->hop_limits =
> +                                       I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
> +                               payload = (unsigned char *)gtp_ipv6 +
> +                                       sizeof(struct ipv6_hdr);
> +                       } else
> +                               payload = (unsigned char *)gtp +
> +                                       sizeof(struct rte_flow_item_gtp);
> +               }
> +       } else {
> +               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> +                           fdir_input->pctype);
> +               return -1;
>         }
>
>         /* fill the flexbytes to payload */

<snip>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 1/8] mbuf: support GTP in software packet type parser
  2017-09-29  8:15               ` Sean Harte
@ 2017-09-29  8:41                 ` Xing, Beilei
  0 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29  8:41 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev



> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Friday, September 29, 2017 4:15 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 1/8] mbuf: support GTP in software
> packet type parser
> 
> On 29 September 2017 at 06:18, Beilei Xing <beilei.xing@intel.com> wrote:
> > Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > Acked-by: Olivier Matz <olivier.matz@6wind.com>
> > ---
> >  lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
> > lib/librte_mbuf/rte_mbuf_ptype.h | 24 ++++++++++++++++++++++++
> >  2 files changed, 26 insertions(+)
> 
> <snip>
> 
> >  /**
> > + * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
> > + * Packet format:
> > + * <'ether type'=0x0800
> > + * | 'version'=4, 'protocol'=17
> > + * | 'destination port'=2123>
> > + * or,
> > + * <'ether type'=0x86DD
> > + * | 'version'=6, 'next header'=17
> > + * | 'destination port'=2123>
> > + */
> > +#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
> 
> This isn't a good description of GTP-C. GTP-C messages have a source port of
> 2123, or a destination port of 2123.

Yes, will distinguish request and  response message.

> 
> > +/**
> > + * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
> > + * Packet format:
> > + * <'ether type'=0x0800
> > + * | 'version'=4, 'protocol'=17
> > + * | 'destination port'=2152>
> > + * or,
> > + * <'ether type'=0x86DD
> > + * | 'version'=6, 'next header'=17
> > + * | 'destination port'=2152>
> > + */
> > +#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
> > +/**
> >   * Mask of tunneling packet types.
> >   */
> >  #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
> > --
> > 2.5.5
> >

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-09-29  8:15               ` Sean Harte
@ 2017-09-29  8:54                 ` Xing, Beilei
  2017-09-29  9:29                   ` Sean Harte
  0 siblings, 1 reply; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29  8:54 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev



> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Friday, September 29, 2017 4:15 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to support
> flow API
> 
> On 29 September 2017 at 06:18, Beilei Xing <beilei.xing@intel.com> wrote:
> > This patch adds GTP, GTPC and GTPU items for generic flow API, and
> > also exposes item fields through the flow command.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > ---
> >  app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
> >  app/test-pmd/config.c                       |  3 ++
> >  doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
> >  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
> >  lib/librte_ether/rte_flow.h                 | 52
> +++++++++++++++++++++++++++++
> >  5 files changed, 116 insertions(+)
> 
> <snip>
> 
> >  /**
> > + * RTE_FLOW_ITEM_TYPE_GTP.
> > + *
> > + * Matches a GTPv1 header.
> > + */
> > +struct rte_flow_item_gtp {
> > +       /**
> > +        * Version (3b), protocol type (1b), reserved (1b),
> > +        * Extension header flag (1b),
> > +        * Sequence number flag (1b),
> > +        * N-PDU number flag (1b).
> > +        */
> > +       uint8_t v_pt_rsv_flags;
> > +       uint8_t msg_type; /**< Message type. */
> > +       rte_be16_t msg_len; /**< Message length. */
> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
> 
> In future, you might add support for GTPv2 (which is used since LTE).
> Maybe this structure should have v1 in its name to avoid confusion?

I considered it before. But I think we can modify it when we support GTPv2 in future, and keep concise 'GTP' currently:)  since I have described it matches v1 header.

> 
> > +
> > +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */ #ifndef __cplusplus
> > +static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
> > +       .teid = RTE_BE32(0xffffffff),
> > +};
> > +#endif
> > +
> > +/**
> >   * Matching pattern item definition.
> >   *
> >   * A pattern is formed by stacking items starting from the lowest
> > protocol
> > --
> > 2.5.5
> >

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-09-29  8:54                 ` Xing, Beilei
@ 2017-09-29  9:29                   ` Sean Harte
  2017-09-29  9:37                     ` Xing, Beilei
  2017-10-02 12:27                     ` Adrien Mazarguil
  0 siblings, 2 replies; 116+ messages in thread
From: Sean Harte @ 2017-09-29  9:29 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

On 29 September 2017 at 09:54, Xing, Beilei <beilei.xing@intel.com> wrote:
>
>
>> -----Original Message-----
>> From: Sean Harte [mailto:seanbh@gmail.com]
>> Sent: Friday, September 29, 2017 4:15 PM
>> To: Xing, Beilei <beilei.xing@intel.com>
>> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
>> <andrey.chilikin@intel.com>; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to support
>> flow API
>>
>> On 29 September 2017 at 06:18, Beilei Xing <beilei.xing@intel.com> wrote:
>> > This patch adds GTP, GTPC and GTPU items for generic flow API, and
>> > also exposes item fields through the flow command.
>> >
>> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>> > Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
>> > ---
>> >  app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
>> >  app/test-pmd/config.c                       |  3 ++
>> >  doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
>> >  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
>> >  lib/librte_ether/rte_flow.h                 | 52
>> +++++++++++++++++++++++++++++
>> >  5 files changed, 116 insertions(+)
>>
>> <snip>
>>
>> >  /**
>> > + * RTE_FLOW_ITEM_TYPE_GTP.
>> > + *
>> > + * Matches a GTPv1 header.
>> > + */
>> > +struct rte_flow_item_gtp {
>> > +       /**
>> > +        * Version (3b), protocol type (1b), reserved (1b),
>> > +        * Extension header flag (1b),
>> > +        * Sequence number flag (1b),
>> > +        * N-PDU number flag (1b).
>> > +        */
>> > +       uint8_t v_pt_rsv_flags;
>> > +       uint8_t msg_type; /**< Message type. */
>> > +       rte_be16_t msg_len; /**< Message length. */
>> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
>>
>> In future, you might add support for GTPv2 (which is used since LTE).
>> Maybe this structure should have v1 in its name to avoid confusion?
>
> I considered it before. But I think we can modify it when we support GTPv2 in future, and keep concise 'GTP' currently:)  since I have described it matches v1 header.
>

You could rename v_pt_rsv_flags to version_flags to avoid some future
code changes to support GTPv2. There's still the issue that not all
GTPv2 messages have a TEID though.

>>
>> > +
>> > +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */ #ifndef __cplusplus
>> > +static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
>> > +       .teid = RTE_BE32(0xffffffff),
>> > +};
>> > +#endif
>> > +
>> > +/**
>> >   * Matching pattern item definition.
>> >   *
>> >   * A pattern is formed by stacking items starting from the lowest
>> > protocol
>> > --
>> > 2.5.5
>> >

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29  8:15               ` Sean Harte
@ 2017-09-29  9:33                 ` Xing, Beilei
  2017-09-29 10:09                   ` Sean Harte
  0 siblings, 1 reply; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29  9:33 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev



> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Friday, September 29, 2017 4:15 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C
> and GTP-U
> 
> On 29 September 2017 at 06:19, Beilei Xing <beilei.xing@intel.com> wrote:
> > This patch adds FDIR support for GTP-C and GTP-U. The input set of
> > GTP-C and GTP-U is TEID.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> >  drivers/net/i40e/i40e_ethdev.h |  30 +++++
> >  drivers/net/i40e/i40e_fdir.c   | 200 ++++++++++++++++++++++---------
> >  drivers/net/i40e/i40e_flow.c   | 263
> +++++++++++++++++++++++++++++++++++------
> >  3 files changed, 396 insertions(+), 97 deletions(-)
> 
> <snip>
> 
> > @@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf
> *pf,
> >                                 rte_cpu_to_be_16(ETHER_TYPE_ARP))
> >                         payload += sizeof(struct arp_hdr);
> >                 set_idx = I40E_FLXPLD_L2_IDX;
> > -               break;
> > -       default:
> > -               PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
> > -               return -EINVAL;
> > +       } else if (fdir_input->flow_ext.customized_pctype) {
> > +               /* If customized pctype is used */
> > +               cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
> > +               if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
> > +                       udp = (struct udp_hdr *)(raw_pkt + len);
> > +                       udp->dgram_len =
> > +
> > + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
> > +
> > +                       gtp = (struct rte_flow_item_gtp *)
> > +                               ((unsigned char *)udp + sizeof(struct udp_hdr));
> > +                       gtp->v_pt_rsv_flags = 0x30;
> 
> 0x30 isn't valid for GTP-C, the sequence number must be present in GTP-C so
> it will be 0x32 or more. Is this byte actually matched against by the device
> using the GTP pctypes?

We construct packets and send the packet to HW  to create flow director rule for GTP-C and
GTP-U. Actually I didn’t get error info with 0x30. And in my test, GTP-C packets can hit  GTP-C
pctype rule. Will try 0x32 later.

> 
> > +                       gtp->msg_len =
> > +                               rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
> > +                       gtp->teid = fdir_input->flow.gtp_flow.teid;
> > +                       gtp->msg_type = 0x1;
> 
> Why use this value?

Just for constructing a GTP packet to create a fdir rule for one pctype, can use other values except 0xFF.

> 
> > +
> > +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPC)
> > +                               udp->dst_port =
> > + rte_cpu_to_be_16(2123);
> 
> This will only match half of GTP-C messages. GTP-C messages have a UDP
> port destination of 2123, or a UDP source port of 2123. To match all GTP-C
> packets you need to look at both.

Yes. But the GTP profile for i40e didn't support response message.

> 
> > +                       else
> > +                               udp->dst_port =
> > + rte_cpu_to_be_16(2152);
> > +
> > +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
> > +                               gtp->msg_type = 0xFF;
> > +                               gtp_ipv4 = (struct ipv4_hdr *)
> > +                                       ((unsigned char *)gtp +
> > +                                        sizeof(struct
> > + rte_flow_item_gtp));
> 
> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are allowed to have
> a sequence number, which adds an extra 4 bytes to the GTP header.

For the GTP profile, there're 4 pctypes for GTP packets: GTPC, GTPU, GTPIPV4, and GTPIPV6.
HW parse which pctype the GTP packets belonge to.
We construct packet to create a fdir rule for one pctype, after that, all packets whose
pctype matches the rule's pctype will hit the rule.

> 
> > +                               gtp_ipv4->version_ihl =
> > +                                       I40E_FDIR_IP_DEFAULT_VERSION_IHL;
> > +                               gtp_ipv4->next_proto_id = IPPROTO_IP;
> > +                               gtp_ipv4->total_length =
> > +                                       rte_cpu_to_be_16(
> > +                                               I40E_FDIR_INNER_IP_DEFAULT_LEN);
> > +                               payload = (unsigned char *)gtp_ipv4 +
> > +                                       sizeof(struct ipv4_hdr);
> > +                       } else if (cus_pctype->index ==
> > +                                  I40E_CUSTOMIZED_GTPU_IPV6) {
> > +                               gtp->msg_type = 0xFF;
> > +                               gtp_ipv6 = (struct ipv6_hdr *)
> > +                                       ((unsigned char *)gtp +
> > +                                        sizeof(struct
> > + rte_flow_item_gtp));
> 
> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are allowed to have
> a sequence number, which adds an extra 4 bytes to the GTP header.

Same with above.

> 
> > +                               gtp_ipv6->vtc_flow =
> > +                                       rte_cpu_to_be_32(
> > +                                              I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
> > +                                              (0 << I40E_FDIR_IPv6_TC_OFFSET));
> > +                               gtp_ipv6->proto = IPPROTO_NONE;
> > +                               gtp_ipv6->payload_len =
> > +                                       rte_cpu_to_be_16(
> > +                                             I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
> > +                               gtp_ipv6->hop_limits =
> > +                                       I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
> > +                               payload = (unsigned char *)gtp_ipv6 +
> > +                                       sizeof(struct ipv6_hdr);
> > +                       } else
> > +                               payload = (unsigned char *)gtp +
> > +                                       sizeof(struct rte_flow_item_gtp);
> > +               }
> > +       } else {
> > +               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> > +                           fdir_input->pctype);
> > +               return -1;
> >         }
> >
> >         /* fill the flexbytes to payload */
> 
> <snip>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-09-29  9:29                   ` Sean Harte
@ 2017-09-29  9:37                     ` Xing, Beilei
  2017-10-02 12:27                     ` Adrien Mazarguil
  1 sibling, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29  9:37 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev



> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Friday, September 29, 2017 5:30 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to support
> flow API
> 
> On 29 September 2017 at 09:54, Xing, Beilei <beilei.xing@intel.com> wrote:
> >
> >
> >> -----Original Message-----
> >> From: Sean Harte [mailto:seanbh@gmail.com]
> >> Sent: Friday, September 29, 2017 4:15 PM
> >> To: Xing, Beilei <beilei.xing@intel.com>
> >> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> >> <andrey.chilikin@intel.com>; dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to
> >> support flow API
> >>
> >> On 29 September 2017 at 06:18, Beilei Xing <beilei.xing@intel.com> wrote:
> >> > This patch adds GTP, GTPC and GTPU items for generic flow API, and
> >> > also exposes item fields through the flow command.
> >> >
> >> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> >> > Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> >> > ---
> >> >  app/test-pmd/cmdline_flow.c                 | 40
> ++++++++++++++++++++++
> >> >  app/test-pmd/config.c                       |  3 ++
> >> >  doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
> >> >  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
> >> >  lib/librte_ether/rte_flow.h                 | 52
> >> +++++++++++++++++++++++++++++
> >> >  5 files changed, 116 insertions(+)
> >>
> >> <snip>
> >>
> >> >  /**
> >> > + * RTE_FLOW_ITEM_TYPE_GTP.
> >> > + *
> >> > + * Matches a GTPv1 header.
> >> > + */
> >> > +struct rte_flow_item_gtp {
> >> > +       /**
> >> > +        * Version (3b), protocol type (1b), reserved (1b),
> >> > +        * Extension header flag (1b),
> >> > +        * Sequence number flag (1b),
> >> > +        * N-PDU number flag (1b).
> >> > +        */
> >> > +       uint8_t v_pt_rsv_flags;
> >> > +       uint8_t msg_type; /**< Message type. */
> >> > +       rte_be16_t msg_len; /**< Message length. */
> >> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
> >>
> >> In future, you might add support for GTPv2 (which is used since LTE).
> >> Maybe this structure should have v1 in its name to avoid confusion?
> >
> > I considered it before. But I think we can modify it when we support GTPv2
> in future, and keep concise 'GTP' currently:)  since I have described it
> matches v1 header.
> >
> 
> You could rename v_pt_rsv_flags to version_flags to avoid some future code
> changes to support GTPv2. There's still the issue that not all
> GTPv2 messages have a TEID though.
> 

Yes, actually I have no a good idea for the compatibility between GTPv1 and GTPv2 currently...
Maybe we can consider it in future.

> >>
> >> > +
> >> > +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */ #ifndef
> >> > +__cplusplus static const struct rte_flow_item_gtp
> rte_flow_item_gtp_mask = {
> >> > +       .teid = RTE_BE32(0xffffffff), }; #endif
> >> > +
> >> > +/**
> >> >   * Matching pattern item definition.
> >> >   *
> >> >   * A pattern is formed by stacking items starting from the lowest
> >> > protocol
> >> > --
> >> > 2.5.5
> >> >

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29  9:33                 ` Xing, Beilei
@ 2017-09-29 10:09                   ` Sean Harte
  2017-09-29 11:30                     ` Xing, Beilei
                                       ` (2 more replies)
  0 siblings, 3 replies; 116+ messages in thread
From: Sean Harte @ 2017-09-29 10:09 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

On 29 September 2017 at 10:33, Xing, Beilei <beilei.xing@intel.com> wrote:
>
>
>> -----Original Message-----
>> From: Sean Harte [mailto:seanbh@gmail.com]
>> Sent: Friday, September 29, 2017 4:15 PM
>> To: Xing, Beilei <beilei.xing@intel.com>
>> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
>> <andrey.chilikin@intel.com>; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C
>> and GTP-U
>>
>> On 29 September 2017 at 06:19, Beilei Xing <beilei.xing@intel.com> wrote:
>> > This patch adds FDIR support for GTP-C and GTP-U. The input set of
>> > GTP-C and GTP-U is TEID.
>> >
>> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>> > ---
>> >  drivers/net/i40e/i40e_ethdev.h |  30 +++++
>> >  drivers/net/i40e/i40e_fdir.c   | 200 ++++++++++++++++++++++---------
>> >  drivers/net/i40e/i40e_flow.c   | 263
>> +++++++++++++++++++++++++++++++++++------
>> >  3 files changed, 396 insertions(+), 97 deletions(-)
>>
>> <snip>
>>
>> > @@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf
>> *pf,
>> >                                 rte_cpu_to_be_16(ETHER_TYPE_ARP))
>> >                         payload += sizeof(struct arp_hdr);
>> >                 set_idx = I40E_FLXPLD_L2_IDX;
>> > -               break;
>> > -       default:
>> > -               PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
>> > -               return -EINVAL;
>> > +       } else if (fdir_input->flow_ext.customized_pctype) {
>> > +               /* If customized pctype is used */
>> > +               cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
>> > +               if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
>> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
>> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
>> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
>> > +                       udp = (struct udp_hdr *)(raw_pkt + len);
>> > +                       udp->dgram_len =
>> > +
>> > + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
>> > +
>> > +                       gtp = (struct rte_flow_item_gtp *)
>> > +                               ((unsigned char *)udp + sizeof(struct udp_hdr));
>> > +                       gtp->v_pt_rsv_flags = 0x30;
>>
>> 0x30 isn't valid for GTP-C, the sequence number must be present in GTP-C so
>> it will be 0x32 or more. Is this byte actually matched against by the device
>> using the GTP pctypes?
>
> We construct packets and send the packet to HW  to create flow director rule for GTP-C and
> GTP-U. Actually I didn’t get error info with 0x30. And in my test, GTP-C packets can hit  GTP-C
> pctype rule. Will try 0x32 later.
>
>>
>> > +                       gtp->msg_len =
>> > +                               rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
>> > +                       gtp->teid = fdir_input->flow.gtp_flow.teid;
>> > +                       gtp->msg_type = 0x1;
>>
>> Why use this value?
>
> Just for constructing a GTP packet to create a fdir rule for one pctype, can use other values except 0xFF.
>
>>
>> > +
>> > +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPC)
>> > +                               udp->dst_port =
>> > + rte_cpu_to_be_16(2123);
>>
>> This will only match half of GTP-C messages. GTP-C messages have a UDP
>> port destination of 2123, or a UDP source port of 2123. To match all GTP-C
>> packets you need to look at both.
>
> Yes. But the GTP profile for i40e didn't support response message.

That's not clear to a user of the rte_flow API

>
>>
>> > +                       else
>> > +                               udp->dst_port =
>> > + rte_cpu_to_be_16(2152);
>> > +
>> > +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
>> > +                               gtp->msg_type = 0xFF;
>> > +                               gtp_ipv4 = (struct ipv4_hdr *)
>> > +                                       ((unsigned char *)gtp +
>> > +                                        sizeof(struct
>> > + rte_flow_item_gtp));
>>
>> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are allowed to have
>> a sequence number, which adds an extra 4 bytes to the GTP header.
>
> For the GTP profile, there're 4 pctypes for GTP packets: GTPC, GTPU, GTPIPV4, and GTPIPV6.
> HW parse which pctype the GTP packets belonge to.
> We construct packet to create a fdir rule for one pctype, after that, all packets whose
> pctype matches the rule's pctype will hit the rule.

My point is that you can only assume the inner IP header starts at an
offset of sizeof(struct rte_flow_item_gtp) if v_pt_rsv_flags is
exactly 0x30. If you match only those packets then some GTP-U packets
will not be matched. That should be clear to a user of the rte_flow
API.

>
>>
>> > +                               gtp_ipv4->version_ihl =
>> > +                                       I40E_FDIR_IP_DEFAULT_VERSION_IHL;
>> > +                               gtp_ipv4->next_proto_id = IPPROTO_IP;
>> > +                               gtp_ipv4->total_length =
>> > +                                       rte_cpu_to_be_16(
>> > +                                               I40E_FDIR_INNER_IP_DEFAULT_LEN);
>> > +                               payload = (unsigned char *)gtp_ipv4 +
>> > +                                       sizeof(struct ipv4_hdr);
>> > +                       } else if (cus_pctype->index ==
>> > +                                  I40E_CUSTOMIZED_GTPU_IPV6) {
>> > +                               gtp->msg_type = 0xFF;
>> > +                               gtp_ipv6 = (struct ipv6_hdr *)
>> > +                                       ((unsigned char *)gtp +
>> > +                                        sizeof(struct
>> > + rte_flow_item_gtp));
>>
>> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are allowed to have
>> a sequence number, which adds an extra 4 bytes to the GTP header.
>
> Same with above.
>
>>
>> > +                               gtp_ipv6->vtc_flow =
>> > +                                       rte_cpu_to_be_32(
>> > +                                              I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
>> > +                                              (0 << I40E_FDIR_IPv6_TC_OFFSET));
>> > +                               gtp_ipv6->proto = IPPROTO_NONE;
>> > +                               gtp_ipv6->payload_len =
>> > +                                       rte_cpu_to_be_16(
>> > +                                             I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
>> > +                               gtp_ipv6->hop_limits =
>> > +                                       I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
>> > +                               payload = (unsigned char *)gtp_ipv6 +
>> > +                                       sizeof(struct ipv6_hdr);
>> > +                       } else
>> > +                               payload = (unsigned char *)gtp +
>> > +                                       sizeof(struct rte_flow_item_gtp);
>> > +               }
>> > +       } else {
>> > +               PMD_DRV_LOG(ERR, "unknown pctype %u.",
>> > +                           fdir_input->pctype);
>> > +               return -1;
>> >         }
>> >
>> >         /* fill the flexbytes to payload */
>>
>> <snip>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29 10:09                   ` Sean Harte
@ 2017-09-29 11:30                     ` Xing, Beilei
  2017-09-29 11:39                       ` Xing, Beilei
  2017-09-29 13:14                     ` Xing, Beilei
  2017-09-29 15:15                     ` Xing, Beilei
  2 siblings, 1 reply; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29 11:30 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev



> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Friday, September 29, 2017 6:10 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C
> and GTP-U
> 
> On 29 September 2017 at 10:33, Xing, Beilei <beilei.xing@intel.com> wrote:
> >
> >
> >> -----Original Message-----
> >> From: Sean Harte [mailto:seanbh@gmail.com]
> >> Sent: Friday, September 29, 2017 4:15 PM
> >> To: Xing, Beilei <beilei.xing@intel.com>
> >> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> >> <andrey.chilikin@intel.com>; dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for
> >> GTP-C and GTP-U
> >>
> >> On 29 September 2017 at 06:19, Beilei Xing <beilei.xing@intel.com> wrote:
> >> > This patch adds FDIR support for GTP-C and GTP-U. The input set of
> >> > GTP-C and GTP-U is TEID.
> >> >
> >> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> >> > ---
> >> >  drivers/net/i40e/i40e_ethdev.h |  30 +++++
> >> >  drivers/net/i40e/i40e_fdir.c   | 200 ++++++++++++++++++++++---------
> >> >  drivers/net/i40e/i40e_flow.c   | 263
> >> +++++++++++++++++++++++++++++++++++------
> >> >  3 files changed, 396 insertions(+), 97 deletions(-)
> >>
> >> <snip>
> >>
> >> > @@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf
> >> *pf,
> >> >                                 rte_cpu_to_be_16(ETHER_TYPE_ARP))
> >> >                         payload += sizeof(struct arp_hdr);
> >> >                 set_idx = I40E_FLXPLD_L2_IDX;
> >> > -               break;
> >> > -       default:
> >> > -               PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input-
> >pctype);
> >> > -               return -EINVAL;
> >> > +       } else if (fdir_input->flow_ext.customized_pctype) {
> >> > +               /* If customized pctype is used */
> >> > +               cus_pctype = i40e_flow_fdir_find_customized_pctype(pf,
> pctype);
> >> > +               if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
> >> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
> >> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
> >> > +                   cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
> >> > +                       udp = (struct udp_hdr *)(raw_pkt + len);
> >> > +                       udp->dgram_len =
> >> > +
> >> > + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
> >> > +
> >> > +                       gtp = (struct rte_flow_item_gtp *)
> >> > +                               ((unsigned char *)udp + sizeof(struct udp_hdr));
> >> > +                       gtp->v_pt_rsv_flags = 0x30;
> >>
> >> 0x30 isn't valid for GTP-C, the sequence number must be present in
> >> GTP-C so it will be 0x32 or more. Is this byte actually matched
> >> against by the device using the GTP pctypes?
> >
> > We construct packets and send the packet to HW  to create flow
> > director rule for GTP-C and GTP-U. Actually I didn’t get error info
> > with 0x30. And in my test, GTP-C packets can hit  GTP-C pctype rule. Will try
> 0x32 later.
> >
> >>
> >> > +                       gtp->msg_len =
> >> > +                               rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
> >> > +                       gtp->teid = fdir_input->flow.gtp_flow.teid;
> >> > +                       gtp->msg_type = 0x1;
> >>
> >> Why use this value?
> >
> > Just for constructing a GTP packet to create a fdir rule for one pctype, can
> use other values except 0xFF.
> >
> >>
> >> > +
> >> > +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPC)
> >> > +                               udp->dst_port =
> >> > + rte_cpu_to_be_16(2123);
> >>
> >> This will only match half of GTP-C messages. GTP-C messages have a
> >> UDP port destination of 2123, or a UDP source port of 2123. To match
> >> all GTP-C packets you need to look at both.
> >
> > Yes. But the GTP profile for i40e didn't support response message.
> 
> That's not clear to a user of the rte_flow API

Rte_flow is a generic API, I think it should allow users to create rule for response message.
But i40e PMD does not the support response message, if user want to create a rule for i40e like below:
Flow create 0 ingress pattern eth / ipv4 / udp src is 2123 / gtpc / end / actions queue index 4 / end
It will fail. But maybe other PMD can support it.

> 
> >
> >>
> >> > +                       else
> >> > +                               udp->dst_port =
> >> > + rte_cpu_to_be_16(2152);
> >> > +
> >> > +                       if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
> >> > +                               gtp->msg_type = 0xFF;
> >> > +                               gtp_ipv4 = (struct ipv4_hdr *)
> >> > +                                       ((unsigned char *)gtp +
> >> > +                                        sizeof(struct
> >> > + rte_flow_item_gtp));
> >>
> >> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
> >> allowed to have a sequence number, which adds an extra 4 bytes to the
> GTP header.
> >
> > For the GTP profile, there're 4 pctypes for GTP packets: GTPC, GTPU,
> GTPIPV4, and GTPIPV6.
> > HW parse which pctype the GTP packets belonge to.
> > We construct packet to create a fdir rule for one pctype, after that,
> > all packets whose pctype matches the rule's pctype will hit the rule.
> 
> My point is that you can only assume the inner IP header starts at an offset
> of sizeof(struct rte_flow_item_gtp) if v_pt_rsv_flags is exactly 0x30. If you
> match only those packets then some GTP-U packets will not be matched.
> That should be clear to a user of the rte_flow API.
> 

No matter if  GTP-U packets has a sequence number, once message-type is 0xFF, the pctype
of the packet parsed by i40e HW will be GTPIPV4 or GTPIPV6. 
So If I create a flow rule for GTP-U packets with v_pt_rsv_flags 0x30 and message_type is 0xFF,
it means I create a rule for pctype GTPIPV4 or GTPIPV6.
Then GTP-U packets with v_pt_rsv_flags 0x32 can hit the rule because its pctype is also GTPIPV4
or GTPIPV6.
It's just i40e HW's behavior, doesn't mean it's effective for other NICs.
Hope I explain it clearly:)

> >
> >>
> >> > +                               gtp_ipv4->version_ihl =
> >> > +                                       I40E_FDIR_IP_DEFAULT_VERSION_IHL;
> >> > +                               gtp_ipv4->next_proto_id = IPPROTO_IP;
> >> > +                               gtp_ipv4->total_length =
> >> > +                                       rte_cpu_to_be_16(
> >> > +                                               I40E_FDIR_INNER_IP_DEFAULT_LEN);
> >> > +                               payload = (unsigned char *)gtp_ipv4 +
> >> > +                                       sizeof(struct ipv4_hdr);
> >> > +                       } else if (cus_pctype->index ==
> >> > +                                  I40E_CUSTOMIZED_GTPU_IPV6) {
> >> > +                               gtp->msg_type = 0xFF;
> >> > +                               gtp_ipv6 = (struct ipv6_hdr *)
> >> > +                                       ((unsigned char *)gtp +
> >> > +                                        sizeof(struct
> >> > + rte_flow_item_gtp));
> >>
> >> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
> >> allowed to have a sequence number, which adds an extra 4 bytes to the
> GTP header.
> >
> > Same with above.
> >
> >>
> >> > +                               gtp_ipv6->vtc_flow =
> >> > +                                       rte_cpu_to_be_32(
> >> > +                                              I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
> >> > +                                              (0 << I40E_FDIR_IPv6_TC_OFFSET));
> >> > +                               gtp_ipv6->proto = IPPROTO_NONE;
> >> > +                               gtp_ipv6->payload_len =
> >> > +                                       rte_cpu_to_be_16(
> >> > +                                             I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
> >> > +                               gtp_ipv6->hop_limits =
> >> > +                                       I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
> >> > +                               payload = (unsigned char *)gtp_ipv6 +
> >> > +                                       sizeof(struct ipv6_hdr);
> >> > +                       } else
> >> > +                               payload = (unsigned char *)gtp +
> >> > +                                       sizeof(struct rte_flow_item_gtp);
> >> > +               }
> >> > +       } else {
> >> > +               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> >> > +                           fdir_input->pctype);
> >> > +               return -1;
> >> >         }
> >> >
> >> >         /* fill the flexbytes to payload */
> >>
> >> <snip>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29 11:30                     ` Xing, Beilei
@ 2017-09-29 11:39                       ` Xing, Beilei
  0 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29 11:39 UTC (permalink / raw)
  To: 'Sean Harte'
  Cc: Wu, Jingjing, Chilikin, Andrey, 'dev@dpdk.org'

> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 7:30 PM
> To: Sean Harte <seanbh@gmail.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C
> and GTP-U
> 
> 
> 
> > -----Original Message-----
> > From: Sean Harte [mailto:seanbh@gmail.com]
> > Sent: Friday, September 29, 2017 6:10 PM
> > To: Xing, Beilei <beilei.xing@intel.com>
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> > <andrey.chilikin@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for
> > GTP-C and GTP-U
> >
> > On 29 September 2017 at 10:33, Xing, Beilei <beilei.xing@intel.com>
> wrote:
> > >
> > >
> > >> -----Original Message-----
> > >> From: Sean Harte [mailto:seanbh@gmail.com]
> > >> Sent: Friday, September 29, 2017 4:15 PM
> > >> To: Xing, Beilei <beilei.xing@intel.com>
> > >> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> > >> <andrey.chilikin@intel.com>; dev@dpdk.org
> > >> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support
> > >> for GTP-C and GTP-U
> > >>
> > >> On 29 September 2017 at 06:19, Beilei Xing <beilei.xing@intel.com>
> wrote:
> > >> > This patch adds FDIR support for GTP-C and GTP-U. The input set
> > >> > of GTP-C and GTP-U is TEID.
> > >> >
> > >> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > >> > ---
> > >> >  drivers/net/i40e/i40e_ethdev.h |  30 +++++
> > >> >  drivers/net/i40e/i40e_fdir.c   | 200
> ++++++++++++++++++++++---------
> > >> >  drivers/net/i40e/i40e_flow.c   | 263
> > >> +++++++++++++++++++++++++++++++++++------
> > >> >  3 files changed, 396 insertions(+), 97 deletions(-)
> > >>
> > >> <snip>
> > >>
> > >> > @@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct
> > >> > i40e_pf
> > >> *pf,
> > >> >
> rte_cpu_to_be_16(ETHER_TYPE_ARP))
> > >> >                         payload += sizeof(struct arp_hdr);
> > >> >                 set_idx = I40E_FLXPLD_L2_IDX;
> > >> > -               break;
> > >> > -       default:
> > >> > -               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> fdir_input-
> > >pctype);
> > >> > -               return -EINVAL;
> > >> > +       } else if (fdir_input->flow_ext.customized_pctype) {
> > >> > +               /* If customized pctype is used */
> > >> > +               cus_pctype =
> > >> > + i40e_flow_fdir_find_customized_pctype(pf,
> > pctype);
> > >> > +               if (cus_pctype->index == I40E_CUSTOMIZED_GTPC
> ||
> > >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV4 ||
> > >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV6 ||
> > >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU) {
> > >> > +                       udp = (struct udp_hdr *)(raw_pkt + len);
> > >> > +                       udp->dgram_len =
> > >> > +
> > >> > + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
> > >> > +
> > >> > +                       gtp = (struct rte_flow_item_gtp *)
> > >> > +                               ((unsigned char *)udp +
> sizeof(struct udp_hdr));
> > >> > +                       gtp->v_pt_rsv_flags = 0x30;
> > >>
> > >> 0x30 isn't valid for GTP-C, the sequence number must be present in
> > >> GTP-C so it will be 0x32 or more. Is this byte actually matched
> > >> against by the device using the GTP pctypes?
> > >
> > > We construct packets and send the packet to HW  to create flow
> > > director rule for GTP-C and GTP-U. Actually I didn’t get error info
> > > with 0x30. And in my test, GTP-C packets can hit  GTP-C pctype rule.
> > > Will try
> > 0x32 later.
> > >
> > >>
> > >> > +                       gtp->msg_len =
> > >> > +
> rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
> > >> > +                       gtp->teid =
> fdir_input->flow.gtp_flow.teid;
> > >> > +                       gtp->msg_type = 0x1;
> > >>
> > >> Why use this value?
> > >
> > > Just for constructing a GTP packet to create a fdir rule for one
> > > pctype, can
> > use other values except 0xFF.
> > >
> > >>
> > >> > +
> > >> > +                       if (cus_pctype->index ==
> I40E_CUSTOMIZED_GTPC)
> > >> > +                               udp->dst_port =
> > >> > + rte_cpu_to_be_16(2123);
> > >>
> > >> This will only match half of GTP-C messages. GTP-C messages have a
> > >> UDP port destination of 2123, or a UDP source port of 2123. To
> > >> match all GTP-C packets you need to look at both.
> > >
> > > Yes. But the GTP profile for i40e didn't support response message.
> >
> > That's not clear to a user of the rte_flow API
> 
> Rte_flow is a generic API, I think it should allow users to create rule for
> response message.
> But i40e PMD does not the support response message, if user want to create
> a rule for i40e like below:
> Flow create 0 ingress pattern eth / ipv4 / udp src is 2123 / gtpc / end /
> actions queue index 4 / end It will fail. But maybe other PMD can support it.
> 
> >
> > >
> > >>
> > >> > +                       else
> > >> > +                               udp->dst_port =
> > >> > + rte_cpu_to_be_16(2152);
> > >> > +
> > >> > +                       if (cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV4) {
> > >> > +                               gtp->msg_type = 0xFF;
> > >> > +                               gtp_ipv4 = (struct ipv4_hdr *)
> > >> > +                                       ((unsigned char *)gtp
> +
> > >> > +                                        sizeof(struct
> > >> > + rte_flow_item_gtp));
> > >>
> > >> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
> > >> allowed to have a sequence number, which adds an extra 4 bytes to
> > >> the
> > GTP header.
> > >
> > > For the GTP profile, there're 4 pctypes for GTP packets: GTPC, GTPU,
> > GTPIPV4, and GTPIPV6.
> > > HW parse which pctype the GTP packets belonge to.
> > > We construct packet to create a fdir rule for one pctype, after
> > > that, all packets whose pctype matches the rule's pctype will hit the rule.
> >
> > My point is that you can only assume the inner IP header starts at an
> > offset of sizeof(struct rte_flow_item_gtp) if v_pt_rsv_flags is
> > exactly 0x30. If you match only those packets then some GTP-U packets
> will not be matched.
> > That should be clear to a user of the rte_flow API.
> >
> 
> No matter if  GTP-U packets has a sequence number, once message-type is
> 0xFF, the pctype of the packet parsed by i40e HW will be GTPIPV4 or
> GTPIPV6.
> So If I create a flow rule for GTP-U packets with v_pt_rsv_flags 0x30 and
> message_type is 0xFF, it means I create a rule for pctype GTPIPV4 or
> GTPIPV6.
> Then GTP-U packets with v_pt_rsv_flags 0x32 can hit the rule because its
> pctype is also GTPIPV4 or GTPIPV6.
> It's just i40e HW's behavior, doesn't mean it's effective for other NICs.
> Hope I explain it clearly:)
> 

The key point is : for i40e HW, different packet types can use the same pctype.

> > >
> > >>
> > >> > +                               gtp_ipv4->version_ihl =
> > >> > +
> I40E_FDIR_IP_DEFAULT_VERSION_IHL;
> > >> > +                               gtp_ipv4->next_proto_id =
> IPPROTO_IP;
> > >> > +                               gtp_ipv4->total_length =
> > >> > +                                       rte_cpu_to_be_16(
> > >> > +
> I40E_FDIR_INNER_IP_DEFAULT_LEN);
> > >> > +                               payload = (unsigned char
> *)gtp_ipv4 +
> > >> > +                                       sizeof(struct
> ipv4_hdr);
> > >> > +                       } else if (cus_pctype->index ==
> > >> > +
> I40E_CUSTOMIZED_GTPU_IPV6) {
> > >> > +                               gtp->msg_type = 0xFF;
> > >> > +                               gtp_ipv6 = (struct ipv6_hdr *)
> > >> > +                                       ((unsigned char *)gtp
> +
> > >> > +                                        sizeof(struct
> > >> > + rte_flow_item_gtp));
> > >>
> > >> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
> > >> allowed to have a sequence number, which adds an extra 4 bytes to
> > >> the
> > GTP header.
> > >
> > > Same with above.
> > >
> > >>
> > >> > +                               gtp_ipv6->vtc_flow =
> > >> > +                                       rte_cpu_to_be_32(
> > >> > +
> I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
> > >> > +                                              (0 <<
> I40E_FDIR_IPv6_TC_OFFSET));
> > >> > +                               gtp_ipv6->proto =
> IPPROTO_NONE;
> > >> > +                               gtp_ipv6->payload_len =
> > >> > +                                       rte_cpu_to_be_16(
> > >> > +
> I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
> > >> > +                               gtp_ipv6->hop_limits =
> > >> > +
> I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
> > >> > +                               payload = (unsigned char
> *)gtp_ipv6 +
> > >> > +                                       sizeof(struct
> ipv6_hdr);
> > >> > +                       } else
> > >> > +                               payload = (unsigned char *)gtp
> +
> > >> > +                                       sizeof(struct
> rte_flow_item_gtp);
> > >> > +               }
> > >> > +       } else {
> > >> > +               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> > >> > +                           fdir_input->pctype);
> > >> > +               return -1;
> > >> >         }
> > >> >
> > >> >         /* fill the flexbytes to payload */
> > >>
> > >> <snip>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29 10:09                   ` Sean Harte
  2017-09-29 11:30                     ` Xing, Beilei
@ 2017-09-29 13:14                     ` Xing, Beilei
  2017-09-29 15:15                     ` Xing, Beilei
  2 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29 13:14 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Friday, September 29, 2017 6:10 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C
> and GTP-U
> 
> On 29 September 2017 at 10:33, Xing, Beilei <beilei.xing@intel.com> wrote:
> >
> >
> >> -----Original Message-----
> >> From: Sean Harte [mailto:seanbh@gmail.com]
> >> Sent: Friday, September 29, 2017 4:15 PM
> >> To: Xing, Beilei <beilei.xing@intel.com>
> >> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> >> <andrey.chilikin@intel.com>; dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for
> >> GTP-C and GTP-U
> >>
> >> On 29 September 2017 at 06:19, Beilei Xing <beilei.xing@intel.com>
> wrote:
> >> > This patch adds FDIR support for GTP-C and GTP-U. The input set of
> >> > GTP-C and GTP-U is TEID.
> >> >
> >> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> >> > ---
> >> >  drivers/net/i40e/i40e_ethdev.h |  30 +++++
> >> >  drivers/net/i40e/i40e_fdir.c   | 200
> ++++++++++++++++++++++---------
> >> >  drivers/net/i40e/i40e_flow.c   | 263
> >> +++++++++++++++++++++++++++++++++++------
> >> >  3 files changed, 396 insertions(+), 97 deletions(-)
> >>
> >> <snip>
> >>
> >> > @@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct
> i40e_pf
> >> *pf,
> >> >
> rte_cpu_to_be_16(ETHER_TYPE_ARP))
> >> >                         payload += sizeof(struct arp_hdr);
> >> >                 set_idx = I40E_FLXPLD_L2_IDX;
> >> > -               break;
> >> > -       default:
> >> > -               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> fdir_input->pctype);
> >> > -               return -EINVAL;
> >> > +       } else if (fdir_input->flow_ext.customized_pctype) {
> >> > +               /* If customized pctype is used */
> >> > +               cus_pctype =
> i40e_flow_fdir_find_customized_pctype(pf, pctype);
> >> > +               if (cus_pctype->index == I40E_CUSTOMIZED_GTPC
> ||
> >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV4 ||
> >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV6 ||
> >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU) {
> >> > +                       udp = (struct udp_hdr *)(raw_pkt + len);
> >> > +                       udp->dgram_len =
> >> > +
> >> > + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
> >> > +
> >> > +                       gtp = (struct rte_flow_item_gtp *)
> >> > +                               ((unsigned char *)udp +
> sizeof(struct udp_hdr));
> >> > +                       gtp->v_pt_rsv_flags = 0x30;
> >>
> >> 0x30 isn't valid for GTP-C, the sequence number must be present in
> >> GTP-C so it will be 0x32 or more. Is this byte actually matched
> >> against by the device using the GTP pctypes?
> >
> > We construct packets and send the packet to HW  to create flow
> > director rule for GTP-C and GTP-U. Actually I didn’t get error info
> > with 0x30. And in my test, GTP-C packets can hit  GTP-C pctype rule. Will
> try 0x32 later.
> >
> >>
> >> > +                       gtp->msg_len =
> >> > +
> rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
> >> > +                       gtp->teid =
> fdir_input->flow.gtp_flow.teid;
> >> > +                       gtp->msg_type = 0x1;
> >>
> >> Why use this value?
> >
> > Just for constructing a GTP packet to create a fdir rule for one pctype, can
> use other values except 0xFF.
> >
> >>
> >> > +
> >> > +                       if (cus_pctype->index ==
> I40E_CUSTOMIZED_GTPC)
> >> > +                               udp->dst_port =
> >> > + rte_cpu_to_be_16(2123);
> >>
> >> This will only match half of GTP-C messages. GTP-C messages have a
> >> UDP port destination of 2123, or a UDP source port of 2123. To match
> >> all GTP-C packets you need to look at both.
> >
> > Yes. But the GTP profile for i40e didn't support response message.
> 
> That's not clear to a user of the rte_flow API

I will clarify some limitation in parsing function description.

> 
> >
> >>
> >> > +                       else
> >> > +                               udp->dst_port =
> >> > + rte_cpu_to_be_16(2152);
> >> > +
> >> > +                       if (cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV4) {
> >> > +                               gtp->msg_type = 0xFF;
> >> > +                               gtp_ipv4 = (struct ipv4_hdr *)
> >> > +                                       ((unsigned char *)gtp +
> >> > +                                        sizeof(struct
> >> > + rte_flow_item_gtp));
> >>
> >> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
> >> allowed to have a sequence number, which adds an extra 4 bytes to the
> GTP header.
> >
> > For the GTP profile, there're 4 pctypes for GTP packets: GTPC, GTPU,
> GTPIPV4, and GTPIPV6.
> > HW parse which pctype the GTP packets belonge to.
> > We construct packet to create a fdir rule for one pctype, after that,
> > all packets whose pctype matches the rule's pctype will hit the rule.
> 
> My point is that you can only assume the inner IP header starts at an offset
> of sizeof(struct rte_flow_item_gtp) if v_pt_rsv_flags is exactly 0x30. If you
> match only those packets then some GTP-U packets will not be matched.
> That should be clear to a user of the rte_flow API.
> 
> >
> >>
> >> > +                               gtp_ipv4->version_ihl =
> >> > +
> I40E_FDIR_IP_DEFAULT_VERSION_IHL;
> >> > +                               gtp_ipv4->next_proto_id =
> IPPROTO_IP;
> >> > +                               gtp_ipv4->total_length =
> >> > +                                       rte_cpu_to_be_16(
> >> > +
> I40E_FDIR_INNER_IP_DEFAULT_LEN);
> >> > +                               payload = (unsigned char
> *)gtp_ipv4 +
> >> > +                                       sizeof(struct
> ipv4_hdr);
> >> > +                       } else if (cus_pctype->index ==
> >> > +
> I40E_CUSTOMIZED_GTPU_IPV6) {
> >> > +                               gtp->msg_type = 0xFF;
> >> > +                               gtp_ipv6 = (struct ipv6_hdr *)
> >> > +                                       ((unsigned char *)gtp +
> >> > +                                        sizeof(struct
> >> > + rte_flow_item_gtp));
> >>
> >> This is only valid if v_pt_rsv_flags is 0x30. GTP-U packets are
> >> allowed to have a sequence number, which adds an extra 4 bytes to the
> GTP header.
> >
> > Same with above.
> >
> >>
> >> > +                               gtp_ipv6->vtc_flow =
> >> > +                                       rte_cpu_to_be_32(
> >> > +
> I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
> >> > +                                              (0 <<
> I40E_FDIR_IPv6_TC_OFFSET));
> >> > +                               gtp_ipv6->proto =
> IPPROTO_NONE;
> >> > +                               gtp_ipv6->payload_len =
> >> > +                                       rte_cpu_to_be_16(
> >> > +
> I40E_FDIR_INNER_IPv6_DEFAULT_LEN);
> >> > +                               gtp_ipv6->hop_limits =
> >> > +
> I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
> >> > +                               payload = (unsigned char
> *)gtp_ipv6 +
> >> > +                                       sizeof(struct
> ipv6_hdr);
> >> > +                       } else
> >> > +                               payload = (unsigned char *)gtp +
> >> > +                                       sizeof(struct
> rte_flow_item_gtp);
> >> > +               }
> >> > +       } else {
> >> > +               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> >> > +                           fdir_input->pctype);
> >> > +               return -1;
> >> >         }
> >> >
> >> >         /* fill the flexbytes to payload */
> >>
> >> <snip>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 2/8] net/i40e: update ptype and pctype info
  2017-09-29  5:18             ` [PATCH v6 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-09-29 13:22               ` Wu, Jingjing
  2017-09-29 13:24                 ` Xing, Beilei
  0 siblings, 1 reply; 116+ messages in thread
From: Wu, Jingjing @ 2017-09-29 13:22 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 1:19 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v6 2/8] net/i40e: update ptype and pctype info
> 
> Update new packet type and new pctype info when downloading
> profile.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>

[......] 


> +	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
> +						ptype_num, 0);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
> +		rte_free(ptype_mapping);
> +		rte_free(ptype);
> +		return -1;
> +	}
> +
> +	rte_free(ptype_mapping);
> +	rte_free(ptype);
> +	return 0;

Minor comments, how about:
	if (ret)
		PMD_DRV_LOG(ERR, "Failed to update mapping table.");

	rte_free(ptype_mapping);
	rte_free(ptype);
	return ret;


Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 3/8] net/i40e: support RSS for new pctype
  2017-09-29  5:18             ` [PATCH v6 3/8] net/i40e: support RSS for new pctype Beilei Xing
@ 2017-09-29 13:24               ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-09-29 13:24 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 1:19 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v6 3/8] net/i40e: support RSS for new pctype
> 
> Enable RSS for new pctypes after downloading
> new profile.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 2/8] net/i40e: update ptype and pctype info
  2017-09-29 13:22               ` Wu, Jingjing
@ 2017-09-29 13:24                 ` Xing, Beilei
  0 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29 13:24 UTC (permalink / raw)
  To: Wu, Jingjing; +Cc: Chilikin, Andrey, dev


> -----Original Message-----
> From: Wu, Jingjing
> Sent: Friday, September 29, 2017 9:23 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: RE: [PATCH v6 2/8] net/i40e: update ptype and pctype info
> 
> 
> 
> > -----Original Message-----
> > From: Xing, Beilei
> > Sent: Friday, September 29, 2017 1:19 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> > Subject: [PATCH v6 2/8] net/i40e: update ptype and pctype info
> >
> > Update new packet type and new pctype info when downloading profile.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> 
> [......]
> 
> 
> > +	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
> > +						ptype_num, 0);
> > +	if (ret) {
> > +		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
> > +		rte_free(ptype_mapping);
> > +		rte_free(ptype);
> > +		return -1;
> > +	}
> > +
> > +	rte_free(ptype_mapping);
> > +	rte_free(ptype);
> > +	return 0;
> 
> Minor comments, how about:
> 	if (ret)
> 		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
> 
> 	rte_free(ptype_mapping);
> 	rte_free(ptype);
> 	return ret;
> 

Will update in next version.

> 
> Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 5/8] net/i40e: finish integration FDIR with generic flow API
  2017-09-29  5:18             ` [PATCH v6 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-09-29 13:28               ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-09-29 13:28 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev

> +
> +	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
> +			    pctype);
> +		return ret;
> +	}
> +
The i40e_check_fdir_programming_status only reports error, cannot
Report success. Please double check

Thanks
Jingjing

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29 10:09                   ` Sean Harte
  2017-09-29 11:30                     ` Xing, Beilei
  2017-09-29 13:14                     ` Xing, Beilei
@ 2017-09-29 15:15                     ` Xing, Beilei
  2 siblings, 0 replies; 116+ messages in thread
From: Xing, Beilei @ 2017-09-29 15:15 UTC (permalink / raw)
  To: Sean Harte; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Friday, September 29, 2017 6:10 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C
> and GTP-U
> 
> On 29 September 2017 at 10:33, Xing, Beilei <beilei.xing@intel.com> wrote:
> >
> >
> >> -----Original Message-----
> >> From: Sean Harte [mailto:seanbh@gmail.com]
> >> Sent: Friday, September 29, 2017 4:15 PM
> >> To: Xing, Beilei <beilei.xing@intel.com>
> >> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Chilikin, Andrey
> >> <andrey.chilikin@intel.com>; dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH v6 6/8] net/i40e: add FDIR support for
> >> GTP-C and GTP-U
> >>
> >> On 29 September 2017 at 06:19, Beilei Xing <beilei.xing@intel.com>
> wrote:
> >> > This patch adds FDIR support for GTP-C and GTP-U. The input set of
> >> > GTP-C and GTP-U is TEID.
> >> >
> >> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> >> > ---
> >> >  drivers/net/i40e/i40e_ethdev.h |  30 +++++
> >> >  drivers/net/i40e/i40e_fdir.c   | 200
> ++++++++++++++++++++++---------
> >> >  drivers/net/i40e/i40e_flow.c   | 263
> >> +++++++++++++++++++++++++++++++++++------
> >> >  3 files changed, 396 insertions(+), 97 deletions(-)
> >>
> >> <snip>
> >>
> >> > @@ -1173,10 +1196,69 @@ i40e_flow_fdir_construct_pkt(struct
> i40e_pf
> >> *pf,
> >> >
> rte_cpu_to_be_16(ETHER_TYPE_ARP))
> >> >                         payload += sizeof(struct arp_hdr);
> >> >                 set_idx = I40E_FLXPLD_L2_IDX;
> >> > -               break;
> >> > -       default:
> >> > -               PMD_DRV_LOG(ERR, "unknown pctype %u.",
> fdir_input->pctype);
> >> > -               return -EINVAL;
> >> > +       } else if (fdir_input->flow_ext.customized_pctype) {
> >> > +               /* If customized pctype is used */
> >> > +               cus_pctype =
> i40e_flow_fdir_find_customized_pctype(pf, pctype);
> >> > +               if (cus_pctype->index == I40E_CUSTOMIZED_GTPC
> ||
> >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV4 ||
> >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU_IPV6 ||
> >> > +                   cus_pctype->index ==
> I40E_CUSTOMIZED_GTPU) {
> >> > +                       udp = (struct udp_hdr *)(raw_pkt + len);
> >> > +                       udp->dgram_len =
> >> > +
> >> > + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
> >> > +
> >> > +                       gtp = (struct rte_flow_item_gtp *)
> >> > +                               ((unsigned char *)udp +
> sizeof(struct udp_hdr));
> >> > +                       gtp->v_pt_rsv_flags = 0x30;
> >>
> >> 0x30 isn't valid for GTP-C, the sequence number must be present in
> >> GTP-C so it will be 0x32 or more. Is this byte actually matched
> >> against by the device using the GTP pctypes?
> >
> > We construct packets and send the packet to HW  to create flow
> > director rule for GTP-C and GTP-U. Actually I didn’t get error info
> > with 0x30. And in my test, GTP-C packets can hit  GTP-C pctype rule. Will
> try 0x32 later.

I checked with 0x32 for GTP-C, it works well. I will change to use 0x32 for GTP-C.
For GTP-U, for i40e, I will keep 0x30 for GTP-U since different GTP-U packet types
can match the same pctype.
Thanks for your all your comments.

Beilei

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling
  2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
                               ` (7 preceding siblings ...)
  2017-09-29  5:19             ` [PATCH v6 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
@ 2017-09-29 15:50             ` Beilei Xing
  2017-09-29 15:50               ` [PATCH v7 1/8] mbuf: support GTP in software packet type parser Beilei Xing
                                 ` (9 more replies)
  8 siblings, 10 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
It depends on Kirill's patch:
http://www.dpdk.org/dev/patchwork/patch/29325/

v7 changes:
 - Distinguish GTP-C request and response message in mbuf description.
 - Clarify GTP-C response message is not supported.
 - Version_type 0x30 is invalid for GTP-C, replace with 0x32.
 - Refine metadata parsing function.
 - Rework for checking fdir programming status.

v6 changes:
 - Reword description of GTP item and GTP structure, mainly support
   GTPv1, not include GTPv0 and GTPv2.

v5 changes:
 - Fix code style.
 - Reword commit log.

v4 changes:
 - Refine fdir related code.
 - Rework profile metadata parsing function.
 - Fix code style.

v3 changes:
 - Rework implementation to support the new profile.
 - Add GTPC and GTPU tunnel type in software packet type parser.
 - Update ptype info when loading profile.
 - Fix bug of updating pctype info.


v2 changes:
 - Enable RSS/FDIR/cloud filter dinamicly by checking profile
 - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
 - Rework FDIR/cloud filter enabling function

Beilei Xing (8):
  mbuf: support GTP in software packet type parser
  net/i40e: update ptype and pctype info
  net/i40e: support RSS for new pctype
  ethdev: add GTP items to support flow API
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  40 ++
 app/test-pmd/config.c                       |   3 +
 doc/guides/prog_guide/rte_flow.rst          |  17 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
 drivers/net/i40e/i40e_ethdev.c              | 531 ++++++++++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h              | 156 +++++++-
 drivers/net/i40e/i40e_fdir.c                | 587 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 502 ++++++++++++++++++++----
 drivers/net/i40e/rte_pmd_i40e.c             |   6 +-
 lib/librte_ether/rte_flow.h                 |  52 +++
 lib/librte_mbuf/rte_mbuf_ptype.c            |   2 +
 lib/librte_mbuf/rte_mbuf_ptype.h            |  32 ++
 12 files changed, 1800 insertions(+), 132 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v7 1/8] mbuf: support GTP in software packet type parser
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-09-29 15:50               ` [PATCH v7 2/8] net/i40e: update ptype and pctype info Beilei Xing
                                 ` (8 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
---
 lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
 lib/librte_mbuf/rte_mbuf_ptype.h | 32 ++++++++++++++++++++++++++++++++
 2 files changed, 34 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index e5c4fae..a450814 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -89,6 +89,8 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
 	case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
 	case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
 	case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
+	case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
+	case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
 	default: return "TUNNEL_UNKNOWN";
 	}
 }
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index acd70bb..978c4a2 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -383,6 +383,38 @@ extern "C" {
  */
 #define RTE_PTYPE_TUNNEL_GRENAT             0x00006000
 /**
+ * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'source port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'source port'=2123>
+ */
+#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
+/**
+ * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2152>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2152>
+ */
+#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
+/**
  * Mask of tunneling packet types.
  */
 #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v7 2/8] net/i40e: update ptype and pctype info
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
  2017-09-29 15:50               ` [PATCH v7 1/8] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-10-05  2:51                 ` Wu, Jingjing
  2017-09-29 15:50               ` [PATCH v7 3/8] net/i40e: support RSS for new pctype Beilei Xing
                                 ` (7 subsequent siblings)
  9 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Update new packet type and new pctype info when downloading
profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c  | 312 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h  |  24 ++++
 drivers/net/i40e/rte_pmd_i40e.c |   6 +-
 3 files changed, 341 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index acdf0de..1da18e4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -65,6 +65,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -1042,6 +1043,21 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+	int i;
+
+	/* Initialize customized pctype */
+	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+		pf->customized_pctype[i].index = i;
+		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+		pf->customized_pctype[i].valid = false;
+	}
+
+	pf->gtp_support = false;
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1307,6 +1323,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
+	/* Initialize customized information */
+	i40e_init_customized_info(pf);
+
 	ret = i40e_init_ethtype_filter_list(dev);
 	if (ret < 0)
 		goto err_init_ethtype_filter_list;
@@ -10913,6 +10932,299 @@ is_i40e_supported(struct rte_eth_dev *dev)
 	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+	int i;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].index == index)
+			return &pf->customized_pctype[i];
+	}
+	return NULL;
+}
+
+static int
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size, uint32_t proto_num,
+			      struct rte_pmd_i40e_proto_info *proto)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t pctype_num;
+	struct rte_pmd_i40e_ptype_info *pctype;
+	uint32_t buff_size;
+	struct i40e_customized_pctype *new_pctype = NULL;
+	uint8_t proto_id;
+	uint8_t pctype_value;
+	char name[64];
+	uint32_t i, j, n;
+	int ret;
+
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&pctype_num, sizeof(pctype_num),
+				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype number");
+		return -1;
+	}
+	if (!pctype_num) {
+		PMD_DRV_LOG(INFO, "No new pctype added");
+		return -1;
+	}
+
+	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+	pctype = rte_zmalloc("new_pctype", buff_size, 0);
+	if (!pctype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+	/* get information about new pctype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)pctype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype list");
+		rte_free(pctype);
+		return -1;
+	}
+
+	/* Update customized pctype. */
+	for (i = 0; i < pctype_num; i++) {
+		pctype_value = pctype[i].ptype_id;
+		memset(name, 0, sizeof(name));
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = pctype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				strcat(name, proto[n].name);
+				strcat(name, "_");
+				break;
+			}
+		}
+		name[strlen(name) - 1] = '\0';
+		if (!strcmp(name, "GTPC"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPC);
+		else if (!strcmp(name, "GTPU_IPV4"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (!strcmp(name, "GTPU_IPV6"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV6);
+		else if (!strcmp(name, "GTPU"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPU);
+		if (new_pctype) {
+			new_pctype->pctype = pctype_value;
+			new_pctype->valid = true;
+		}
+	}
+
+	rte_free(pctype);
+	return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+			       uint32_t pkg_size, uint32_t proto_num,
+			       struct rte_pmd_i40e_proto_info *proto)
+{
+	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+	uint8_t port_id = dev->data->port_id;
+	uint32_t ptype_num;
+	struct rte_pmd_i40e_ptype_info *ptype;
+	uint32_t buff_size;
+	uint8_t proto_id;
+	char name[16];
+	uint32_t i, j, n;
+	bool inner_ip;
+	int ret;
+
+	/* get information about new ptype num */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&ptype_num, sizeof(ptype_num),
+				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype number");
+		return ret;
+	}
+	if (!ptype_num) {
+		PMD_DRV_LOG(INFO, "No new ptype added");
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+	ptype = rte_zmalloc("new_ptype", buff_size, 0);
+	if (!ptype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+
+	/* get information about new ptype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)ptype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype list");
+		rte_free(ptype);
+		return ret;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+	if (!ptype_mapping) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		rte_free(ptype);
+		return -1;
+	}
+
+	/* Update ptype mapping table. */
+	for (i = 0; i < ptype_num; i++) {
+		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+		ptype_mapping[i].sw_ptype = 0;
+		inner_ip = false;
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = ptype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				memset(name, 0, sizeof(name));
+				strcpy(name, proto[n].name);
+				if (!strncmp(name, "IPV4", 4) && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strncmp(name, "IPV4", 4) &&
+					   inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+				} else if (!strncmp(name, "IPV6", 4) &&
+					   !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strncmp(name, "IPV6", 4) &&
+					   inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+				} else if (!strncmp(name, "IPV4FRAG", 8)) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strncmp(name, "IPV6FRAG", 8)) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strncmp(name, "GTPC", 4))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPC;
+				else if (!strncmp(name, "GTPU", 4))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPU;
+				else if (!strncmp(name, "UDP", 3))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_UDP;
+				else if (!strncmp(name, "TCP", 3))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_TCP;
+				else if (!strncmp(name, "SCTP", 4))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_SCTP;
+				else if (!strncmp(name, "ICMP", 4) ||
+					 !strncmp(name, "ICMPV6", 6))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_ICMP;
+
+				break;
+			}
+		}
+	}
+
+	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+						ptype_num, 0);
+	if (ret)
+		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+
+	rte_free(ptype_mapping);
+	rte_free(ptype);
+	return ret;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t proto_num;
+	struct rte_pmd_i40e_proto_info *proto;
+	uint32_t buff_size;
+	uint32_t i;
+	int ret;
+
+	/* get information about protocol number */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				       (uint8_t *)&proto_num, sizeof(proto_num),
+				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol number");
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new protocol added");
+		return;
+	}
+
+	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+	proto = rte_zmalloc("new_proto", buff_size, 0);
+	if (!proto) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return;
+	}
+
+	/* get information about protocol list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)proto, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol list");
+		rte_free(proto);
+		return;
+	}
+
+	/* Check if GTP is supported. */
+	for (i = 0; i < proto_num; i++) {
+		if (!strncmp(proto[i].name, "GTP", 3)) {
+			pf->gtp_support = true;
+			break;
+		}
+	}
+
+	/* Update customized pctype info */
+	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+					    proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+	/* Update customized ptype info */
+	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+					   proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+	rte_free(proto);
+}
+
 /* Create a QinQ cloud filter
  *
  * The Fortville NIC has limited resources for tunnel filters,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ad80f0f..73fb5c3 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -722,6 +722,21 @@ struct i40e_tm_conf {
 	bool committed;
 };
 
+enum i40e_new_pctype {
+	I40E_CUSTOMIZED_GTPC = 0,
+	I40E_CUSTOMIZED_GTPU_IPV4,
+	I40E_CUSTOMIZED_GTPU_IPV6,
+	I40E_CUSTOMIZED_GTPU,
+	I40E_CUSTOMIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID     0
+struct i40e_customized_pctype {
+	enum i40e_new_pctype index;  /* Indicate which customized pctype */
+	uint8_t pctype;   /* New pctype value */
+	bool valid;   /* Check if it's valid */
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -786,6 +801,11 @@ struct i40e_pf {
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
+
+	/* Dynamic Device Personalization */
+	bool gtp_support; /* 1 - support GTP-C and GTP-U */
+	/* customer customized pctype */
+	struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
 };
 
 enum pending_msg {
@@ -1003,6 +1023,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+				 uint32_t pkg_size);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index f57e59b..5aa9c69 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1608,6 +1608,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -EINVAL;
 	}
 
+	i40e_update_customized_info(dev, buff, size);
+
 	/* Find metadata segment */
 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
 							pkg_hdr);
@@ -2109,7 +2111,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
 	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
 	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
 	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
-	    tnl != RTE_PTYPE_TUNNEL_GRENAT)
+	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPU)
 		return -1;
 
 	if (il2 &&
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v7 3/8] net/i40e: support RSS for new pctype
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
  2017-09-29 15:50               ` [PATCH v7 1/8] mbuf: support GTP in software packet type parser Beilei Xing
  2017-09-29 15:50               ` [PATCH v7 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-09-29 15:50               ` [PATCH v7 4/8] ethdev: add GTP items to support flow API Beilei Xing
                                 ` (6 subsequent siblings)
  9 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Enable RSS for new pctypes after downloading
new profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 1da18e4..87e451a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1934,6 +1934,31 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 	return i40e_phy_conf_link(hw, abilities, speed, true);
 }
 
+static void
+i40e_customized_pctype_hash_set(struct i40e_pf *pf, bool enable)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint64_t hena;
+	int i;
+
+	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].valid) {
+			if (enable)
+				hena |= 1ULL << pf->customized_pctype[i].pctype;
+			else
+				hena &= ~(1ULL <<
+					  pf->customized_pctype[i].pctype);
+		}
+	}
+
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+	I40E_WRITE_FLUSH(hw);
+}
+
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
@@ -2081,6 +2106,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
+	i40e_customized_pctype_hash_set(pf, true);
+
 	return I40E_SUCCESS;
 
 err_up:
@@ -2155,6 +2182,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 	int i;
 	int ret;
 
+	i40e_customized_pctype_hash_set(pf, false);
+
 	PMD_INIT_FUNC_TRACE();
 
 	i40e_dev_stop(dev);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v7 4/8] ethdev: add GTP items to support flow API
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
                                 ` (2 preceding siblings ...)
  2017-09-29 15:50               ` [PATCH v7 3/8] net/i40e: support RSS for new pctype Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-10-05  8:01                 ` Wu, Jingjing
  2017-09-29 15:50               ` [PATCH v7 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
                                 ` (5 subsequent siblings)
  9 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds GTP, GTPC and GTPU items for
generic flow API, and also exposes item fields
through the flow command.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
 app/test-pmd/config.c                       |  3 ++
 doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
 lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
 5 files changed, 116 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..26c3e4f 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTP,
+	ITEM_GTP_TEID,
+	ITEM_GTPC,
+	ITEM_GTPU,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +455,9 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTP,
+	ITEM_GTPC,
+	ITEM_GTPU,
 	ZERO,
 };
 
@@ -588,6 +595,12 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtp[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1434,33 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTP] = {
+		.name = "gtp",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTP_TEID] = {
+		.name = "teid",
+		.help = "tunnel endpoint identifier",
+		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
+	[ITEM_GTPC] = {
+		.name = "gtpc",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTPU] = {
+		.name = "gtpu",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index e8e311c..9b09bbd 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -949,6 +949,9 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..73f12ee 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,23 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTP``, ``GTPC``, ``GTPU``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches a GTPv1 header.
+
+Note: GTP, GTPC and GTPU use the same structure. GTPC and GTPU item
+are defined for a user-friendly API when creating GTP-C and GTP-U
+flow rules.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: tunnel endpoint identifier.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5..4c2facc 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2696,6 +2696,10 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtp``, ``gtpc``, ``gtpu``: match GTPv1 header.
+
+  - ``teid {unsigned}``: tunnel endpoint identifier.
+
 Actions list
 ^^^^^^^^^^^^
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..b1a1b97 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,33 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTP,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-C packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPC,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-U packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPU,
 };
 
 /**
@@ -735,6 +762,31 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTPv1 header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version (3b), protocol type (1b), reserved (1b),
+	 * Extension header flag (1b),
+	 * Sequence number flag (1b),
+	 * N-PDU number flag (1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type. */
+	rte_be16_t msg_len; /**< Message length. */
+	rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+	.teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v7 5/8] net/i40e: finish integration FDIR with generic flow API
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
                                 ` (3 preceding siblings ...)
  2017-09-29 15:50               ` [PATCH v7 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-10-05  2:52                 ` Wu, Jingjing
  2017-09-29 15:50               ` [PATCH v7 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                                 ` (4 subsequent siblings)
  9 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  83 ++++++-
 drivers/net/i40e/i40e_fdir.c   | 489 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  76 +++----
 3 files changed, 584 insertions(+), 64 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 73fb5c3..4d690a1 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -461,6 +461,80 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /*
+ * A union contains the inputs for all types of flow
+ * items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/* A structure used to contain extend input of flow */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/* It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
+	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+};
+
+/* A structure used to define the input for a flow director filter entry */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/* Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/* Additional fields to match */
+};
+
+/* Behavior will be taken if FDIR match */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/* Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */
+	I40E_FDIR_REPORT_ID,            /* Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /* Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /* Report 8 flex bytes. */
+};
+
+/* A structure used to define an action when match FDIR packet filter. */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /* Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /* Behavior will be taken */
+	enum i40e_fdir_status report_status;  /* Status report option */
+	/* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/* A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/* ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /* Input set */
+	struct i40e_fdir_action action;  /* Action taken when match */
+};
+
+/*
  * Structure to store flex pit for flow diretor.
  */
 struct i40e_fdir_flex_pit {
@@ -483,7 +557,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -907,7 +981,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -981,7 +1055,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -994,6 +1068,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 84c0a1f..e9e2f44 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -934,6 +939,262 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/* i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1007,17 +1268,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1052,7 +1313,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1082,16 +1343,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1372,68 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES(
+			(int)i40e_flowtype_to_pctype(
+			filter->input.flow_type)));
+	} else
+		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1452,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1142,12 +1462,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
 			hw, I40E_GLQF_FD_PCTYPES(
-			(int)i40e_flowtype_to_pctype(
-			filter->input.flow_type)));
+			(int)filter->input.pctype));
 	} else
-		pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+		pctype = filter->input.pctype;
 
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1291,13 +1610,147 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 		return -ETIMEDOUT;
 	}
 	/* totally delay 10 ms to check programming status*/
+	rte_delay_us(I40E_FDIR_MAX_WAIT_US);
+	if (i40e_check_fdir_programming_status(rxq) < 0) {
+		PMD_DRV_LOG(ERR,
+		    "Failed to program FDIR filter: programming status reported.");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR,
+		    "Failed to program FDIR filter: time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
 	for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
 		if (i40e_check_fdir_programming_status(rxq) >= 0)
 			return 0;
 		rte_delay_us(1);
 	}
 	PMD_DRV_LOG(ERR,
-		"Failed to program FDIR filter: programming status reported.");
+		 "Failed to program FDIR filter: programming status reported.");
 	return -ETIMEDOUT;
 }
 
@@ -1580,7 +2033,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,14 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(flow_type);
-	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2797,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2832,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2855,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2876,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2911,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3877,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3927,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v7 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
                                 ` (4 preceding siblings ...)
  2017-09-29 15:50               ` [PATCH v7 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-10-05  3:09                 ` Wu, Jingjing
  2017-09-29 15:50               ` [PATCH v7 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
                                 ` (3 subsequent siblings)
  9 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds FDIR support for GTP-C and GTP-U. The
input set of GTP-C and GTP-U is TEID.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  30 +++++
 drivers/net/i40e/i40e_fdir.c   | 214 ++++++++++++++++++++++++---------
 drivers/net/i40e/i40e_flow.c   | 267 +++++++++++++++++++++++++++++++++++------
 3 files changed, 413 insertions(+), 98 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 4d690a1..502f6c6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -460,6 +460,25 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/* A structure used to define the input for GTP flow */
+struct i40e_gtp_flow {
+	struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
+	uint8_t msg_type;              /* Message type. */
+	uint32_t teid;                 /* TEID in big endian. */
+};
+
+/* A structure used to define the input for GTP IPV4 flow */
+struct i40e_gtp_ipv4_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv4_flow ip4;
+};
+
+/* A structure used to define the input for GTP IPV6 flow */
+struct i40e_gtp_ipv6_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv6_flow ip6;
+};
+
 /*
  * A union contains the inputs for all types of flow
  * items in flows need to be in big endian
@@ -474,6 +493,14 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtp_flow       gtp_flow;
+	struct i40e_gtp_ipv4_flow  gtp_ipv4_flow;
+	struct i40e_gtp_ipv6_flow  gtp_ipv6_flow;
+};
+
+enum i40e_fdir_ip_type {
+	I40E_FDIR_IPTYPE_IPV4,
+	I40E_FDIR_IPTYPE_IPV6,
 };
 
 /* A structure used to contain extend input of flow */
@@ -483,6 +510,9 @@ struct i40e_fdir_flow_ext {
 	/* It is filled by the flexible payload to match. */
 	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
 	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+	bool inner_ip;   /* If there is inner ip */
+	enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
+	bool customized_pctype; /* If customized pctype is used */
 };
 
 /* A structure used to define the input for a flow director filter entry */
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index e9e2f44..bab8981 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,16 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
+#define I40E_FDIR_INNER_IPV6_DEFAULT_LEN    344
+
+#define I40E_FDIR_GTPC_DST_PORT             2123
+#define I40E_FDIR_GTPU_DST_PORT             2152
+#define I40E_FDIR_GTP_VER_FLAG_0X30         0x30
+#define I40E_FDIR_GTP_VER_FLAG_0X32         0x32
+#define I40E_FDIR_GTP_MSG_TYPE_0X01         0x01
+#define I40E_FDIR_GTP_MSG_TYPE_0XFF         0xFF
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -939,16 +949,34 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static struct i40e_customized_pctype *
+i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+	struct i40e_customized_pctype *cus_pctype;
+	enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
+
+	for (; i < I40E_CUSTOMIZED_MAX; i++) {
+		cus_pctype = &pf->customized_pctype[i];
+		if (pctype == cus_pctype->pctype)
+			return cus_pctype;
+	}
+	return NULL;
+}
+
 static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+				const struct i40e_fdir_input *fdir_input,
 				unsigned char *raw_pkt,
 				bool vlan)
 {
+	struct i40e_customized_pctype *cus_pctype = NULL;
 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
 	uint16_t *ether_type;
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
+	bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -975,27 +1003,30 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	if (is_customized_pctype) {
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (!cus_pctype)
+			PMD_DRV_LOG(ERR, "unknown pctype %u.",
+				    fdir_input->pctype);
+	}
+
+	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+		 is_customized_pctype) {
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
 		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
 		/* set len to by default */
 		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
-		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
-					fdir_input->flow.ip4_flow.proto :
-					next_proto[fdir_input->pctype];
 		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
-					fdir_input->flow.ip4_flow.ttl :
-					I40E_FDIR_IP_DEFAULT_TTL;
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
 		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
 		/**
 		 * The source and destination fields in the transmitted packet
@@ -1004,13 +1035,22 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		 */
 		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
 		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+		if (!is_customized_pctype)
+			ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+				fdir_input->flow.ip4_flow.proto :
+				next_proto[fdir_input->pctype];
+		else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+			ip->next_proto_id = IPPROTO_UDP;
 		len += sizeof(struct ipv4_hdr);
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		ip6 = (struct ipv6_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1021,11 +1061,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip6->payload_len =
 			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
 		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
-					fdir_input->flow.ipv6_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ipv6_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
-					fdir_input->flow.ipv6_flow.hop_limits :
-					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+			fdir_input->flow.ipv6_flow.hop_limits :
+			I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
 		/**
 		 * The source and destination fields in the transmitted packet
 		 * need to be presented in a reversed order with respect
@@ -1038,12 +1078,12 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 			   &fdir_input->flow.ipv6_flow.src_ip,
 			   IPV6_ADDR_LEN);
 		len += sizeof(struct ipv6_hdr);
-		break;
-	default:
+	} else {
 		PMD_DRV_LOG(ERR, "unknown pctype %u.",
 			    fdir_input->pctype);
 		return -1;
 	}
+
 	return len;
 }
 
@@ -1057,23 +1097,28 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			     const struct i40e_fdir_input *fdir_input,
 			     unsigned char *raw_pkt)
 {
-	unsigned char *payload, *ptr;
+	unsigned char *payload = NULL;
+	unsigned char *ptr;
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
+	struct ipv4_hdr *gtp_ipv4;
+	struct ipv6_hdr *gtp_ipv6;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
+	struct i40e_customized_pctype *cus_pctype;
 
 	/* fill the ethernet and IP head */
-	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
 					      !!fdir_input->flow_ext.vlan_tci);
 	if (len < 0)
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1084,9 +1129,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1097,9 +1140,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1110,15 +1151,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1129,9 +1166,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1142,9 +1177,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1155,14 +1188,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
 		payload = raw_pkt + len;
 		/**
 		 * ARP packet is a special case on which the payload
@@ -1172,10 +1202,76 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 				rte_cpu_to_be_16(ETHER_TYPE_ARP))
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
-		return -EINVAL;
+	} else if (fdir_input->flow_ext.customized_pctype) {
+		/* If customized pctype is used */
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
+			udp = (struct udp_hdr *)(raw_pkt + len);
+			udp->dgram_len =
+				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+			gtp = (struct rte_flow_item_gtp *)
+				((unsigned char *)udp + sizeof(struct udp_hdr));
+			gtp->msg_len =
+				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+			gtp->teid = fdir_input->flow.gtp_flow.teid;
+			gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
+
+			/* GTP-C message type is not supported. */
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
+				udp->dst_port =
+				      rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
+				gtp->v_pt_rsv_flags =
+					I40E_FDIR_GTP_VER_FLAG_0X32;
+			} else {
+				udp->dst_port =
+				      rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
+				gtp->v_pt_rsv_flags =
+					I40E_FDIR_GTP_VER_FLAG_0X30;
+			}
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
+				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+				gtp_ipv4 = (struct ipv4_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv4->version_ihl =
+					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+				gtp_ipv4->next_proto_id = IPPROTO_IP;
+				gtp_ipv4->total_length =
+					rte_cpu_to_be_16(
+						I40E_FDIR_INNER_IP_DEFAULT_LEN);
+				payload = (unsigned char *)gtp_ipv4 +
+					sizeof(struct ipv4_hdr);
+			} else if (cus_pctype->index ==
+				   I40E_CUSTOMIZED_GTPU_IPV6) {
+				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+				gtp_ipv6 = (struct ipv6_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv6->vtc_flow =
+					rte_cpu_to_be_32(
+					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
+				gtp_ipv6->proto = IPPROTO_NONE;
+				gtp_ipv6->payload_len =
+					rte_cpu_to_be_16(
+					      I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
+				gtp_ipv6->hop_limits =
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+				payload = (unsigned char *)gtp_ipv6 +
+					sizeof(struct ipv6_hdr);
+			} else
+				payload = (unsigned char *)gtp +
+					sizeof(struct rte_flow_item_gtp);
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
 	}
 
 	/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..370c93b 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +250,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1644,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	/* FDIR - support default flow type with flexible payload */
 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,14 +2378,52 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
 	return 0;
 }
 
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+				enum rte_flow_item_type item_type,
+				struct i40e_fdir_filter_conf *filter)
+{
+	struct i40e_customized_pctype *cus_pctype = NULL;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_GTPC:
+		cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPC);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTPU:
+		if (!filter->input.flow_ext.inner_ip)
+			cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPU);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV4)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV6)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type");
+		break;
+	}
+
+	if (cus_pctype)
+		return cus_pctype->pctype;
+
+	return I40E_FILTER_PCTYPE_INVALID;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
- * 3. Supported flow type and input set: refer to array
+ * 3. Default supported flow type and input set: refer to array
  *    valid_fdir_inset_table in i40e_ethdev.c.
  * 4. Mask of fields which need to be matched should be
  *    filled with 1.
  * 5. Mask of fields which needn't to be matched should be
  *    filled with 0.
+ * 6. GTP profile supports GTPv1 only.
+ * 7. GTP-C response message ('source_port' = 2123) is not supported.
  */
 static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
@@ -2326,14 +2440,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	uint8_t pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
 	uint32_t i, j;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2351,12 +2467,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	uint16_t outer_tpid;
 	uint16_t ether_type;
 	uint32_t vtc_flow_cpu;
+	bool outer_ip = true;
 	int ret;
 
 	memset(off_arr, 0, sizeof(off_arr));
 	memset(len_arr, 0, sizeof(len_arr));
 	memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
 	outer_tpid = i40e_get_outer_vlan(dev);
+	filter->input.flow_ext.customized_pctype = false;
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
@@ -2430,7 +2548,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv4_mask =
 				(const struct rte_flow_item_ipv4 *)item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
+			if (ipv4_spec && ipv4_mask && outer_ip) {
 				/* Check IPv4 mask and update input set */
 				if (ipv4_mask->hdr.version_ihl ||
 				    ipv4_mask->hdr.total_length ||
@@ -2475,9 +2593,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					ipv4_spec->hdr.src_addr;
 				filter->input.flow.ip4_flow.dst_ip =
 					ipv4_spec->hdr.dst_addr;
+
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV4;
+			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv4 mask.");
+				return -rte_errno;
 			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+			if (outer_ip)
+				outer_ip = false;
 
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -2487,7 +2618,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv6_mask =
 				(const struct rte_flow_item_ipv6 *)item->mask;
 
-			if (ipv6_spec && ipv6_mask) {
+			if (ipv6_spec && ipv6_mask && outer_ip) {
 				/* Check IPv6 mask and update input set */
 				if (ipv6_mask->hdr.payload_len) {
 					rte_flow_error_set(error, EINVAL,
@@ -2538,10 +2669,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				else
 					pctype =
 					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
-			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV6;
+			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv6 mask");
+				return -rte_errno;
+			}
 
+			if (outer_ip)
+				outer_ip = false;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
@@ -2636,6 +2779,37 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			if (!pf->gtp_support) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported protocol");
+				return -rte_errno;
+			}
+
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				filter->input.flow.gtp_flow.teid =
+					gtp_spec->teid;
+				filter->input.flow_ext.customized_pctype = true;
+				cus_proto = item_type;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2948,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Conflict with the first rule's input set.");
-		return -rte_errno;
-	} else if (ret == -EINVAL) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Invalid pattern mask.");
-		return -rte_errno;
+	/* Get customized pctype value */
+	if (filter->input.flow_ext.customized_pctype) {
+		pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Unsupported pctype");
+			return -rte_errno;
+		}
 	}
 
-	filter->input.pctype = pctype;
+	/* If customized pctype is not used, set fdir configuration.*/
+	if (!filter->input.flow_ext.customized_pctype) {
+		ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Conflict with the first rule's input set.");
+			return -rte_errno;
+		} else if (ret == -EINVAL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Invalid pattern mask.");
+			return -rte_errno;
+		}
 
-	/* Store flex mask to SW */
-	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Exceed maximal number of bitmasks");
-		return -rte_errno;
-	} else if (ret == -2) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Conflict with the first flexible rule");
-		return -rte_errno;
-	} else if (ret > 0)
-		cfg_flex_msk = false;
+		/* Store flex mask to SW */
+		ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Exceed maximal number of bitmasks");
+			return -rte_errno;
+		} else if (ret == -2) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Conflict with the first flexible rule");
+			return -rte_errno;
+		} else if (ret > 0)
+			cfg_flex_msk = false;
 
-	if (cfg_flex_pit)
-		i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+		if (cfg_flex_pit)
+			i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-	if (cfg_flex_msk)
-		i40e_flow_set_fdir_flex_msk(pf, pctype);
+		if (cfg_flex_msk)
+			i40e_flow_set_fdir_flex_msk(pf, pctype);
+	}
+
+	filter->input.pctype = pctype;
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v7 7/8] net/i40e: add cloud filter parsing function for GTP
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
                                 ` (5 preceding siblings ...)
  2017-09-29 15:50               ` [PATCH v7 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-10-05  3:13                 ` Wu, Jingjing
  2017-09-29 15:50               ` [PATCH v7 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
                                 ` (2 subsequent siblings)
  9 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U to support cloud filter.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 153 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 155 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 502f6c6..436ca2c 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -703,6 +703,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 370c93b..9470ff5 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1808,6 +1814,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3825,6 +3836,148 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ * 5. GTP profile supports GTPv1 only.
+ * 6. GTP-C response message ('source_port' = 2123) is not supported.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+
+	if (!pf->gtp_support) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   item,
+				   "GTP is not supported by default.");
+		return -rte_errno;
+	}
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v7 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
                                 ` (6 preceding siblings ...)
  2017-09-29 15:50               ` [PATCH v7 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-09-29 15:50               ` Beilei Xing
  2017-10-05  8:03                 ` Wu, Jingjing
  2017-10-04 22:43               ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Ferruh Yigit
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
  9 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-09-29 15:50 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch sets TEID of GTP-C and GTP-U as filter type
by replacing existed filter types inner_mac and TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 193 +++++++++++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_ethdev.h |  17 ++--
 drivers/net/i40e/i40e_flow.c   |  12 +--
 3 files changed, 191 insertions(+), 31 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 87e451a..225ac4c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7181,7 +7181,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
-	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 3 entries */
@@ -7229,12 +7229,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7252,12 +7252,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 
@@ -7348,7 +7467,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x40;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
 		break;
 	case I40E_TUNNEL_TYPE_MPLSoGRE:
 		if (!pf->mpls_replace_flag) {
@@ -7364,7 +7483,37 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x0;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+			0x0;
+		big_buffer = 1;
 		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
@@ -7392,13 +7541,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 
 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
-			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+			I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	else {
 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
 						&pfilter->element.flags);
@@ -10919,14 +11074,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 			   sizeof(f->input.general_fields));
 
 		if (((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
 			big_buffer = 1;
 
 		if (big_buffer)
@@ -11314,7 +11469,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 2 entries */
@@ -11345,13 +11500,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L2 filter, input for L2 filter will be L1 filter  */
 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 436ca2c..b223456 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -650,12 +650,16 @@ struct i40e_ethertype_rule {
 
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP	8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE	9
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10		0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11		0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11		0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13		0x13
+#define I40E_AQC_NEW_TR_21			21
+#define I40E_AQC_NEW_TR_22			22
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -905,6 +909,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag;   /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 9470ff5..0d9c972 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4348,12 +4348,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		vsi = vf->vsi;
 	}
 
-	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
 		big_buffer = 1;
 
 	if (big_buffer)
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-09-29  9:29                   ` Sean Harte
  2017-09-29  9:37                     ` Xing, Beilei
@ 2017-10-02 12:27                     ` Adrien Mazarguil
  2017-10-03  8:56                       ` Sean Harte
  1 sibling, 1 reply; 116+ messages in thread
From: Adrien Mazarguil @ 2017-10-02 12:27 UTC (permalink / raw)
  To: Sean Harte; +Cc: Xing, Beilei, Wu, Jingjing, Chilikin, Andrey, dev

On Fri, Sep 29, 2017 at 10:29:55AM +0100, Sean Harte wrote:
> On 29 September 2017 at 09:54, Xing, Beilei <beilei.xing@intel.com> wrote:
<snip>
> >> >  /**
> >> > + * RTE_FLOW_ITEM_TYPE_GTP.
> >> > + *
> >> > + * Matches a GTPv1 header.
> >> > + */
> >> > +struct rte_flow_item_gtp {
> >> > +       /**
> >> > +        * Version (3b), protocol type (1b), reserved (1b),
> >> > +        * Extension header flag (1b),
> >> > +        * Sequence number flag (1b),
> >> > +        * N-PDU number flag (1b).
> >> > +        */
> >> > +       uint8_t v_pt_rsv_flags;
> >> > +       uint8_t msg_type; /**< Message type. */
> >> > +       rte_be16_t msg_len; /**< Message length. */
> >> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
> >>
> >> In future, you might add support for GTPv2 (which is used since LTE).
> >> Maybe this structure should have v1 in its name to avoid confusion?
> >
> > I considered it before. But I think we can modify it when we support GTPv2 in future, and keep concise 'GTP' currently:)  since I have described it matches v1 header.
> >
> 
> You could rename v_pt_rsv_flags to version_flags to avoid some future
> code changes to support GTPv2. There's still the issue that not all
> GTPv2 messages have a TEID though.

Although they have the same size, the header of these two protocols
obviously differs. My suggestion would be to go with a separate GTPv2
pattern item using its own dedicated structure instead.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-10-02 12:27                     ` Adrien Mazarguil
@ 2017-10-03  8:56                       ` Sean Harte
  2017-10-05  8:06                         ` Wu, Jingjing
  0 siblings, 1 reply; 116+ messages in thread
From: Sean Harte @ 2017-10-03  8:56 UTC (permalink / raw)
  To: Adrien Mazarguil; +Cc: Xing, Beilei, Wu, Jingjing, Chilikin, Andrey, dev

On 2 October 2017 at 13:27, Adrien Mazarguil <adrien.mazarguil@6wind.com> wrote:
> On Fri, Sep 29, 2017 at 10:29:55AM +0100, Sean Harte wrote:
>> On 29 September 2017 at 09:54, Xing, Beilei <beilei.xing@intel.com> wrote:
> <snip>
>> >> >  /**
>> >> > + * RTE_FLOW_ITEM_TYPE_GTP.
>> >> > + *
>> >> > + * Matches a GTPv1 header.
>> >> > + */
>> >> > +struct rte_flow_item_gtp {
>> >> > +       /**
>> >> > +        * Version (3b), protocol type (1b), reserved (1b),
>> >> > +        * Extension header flag (1b),
>> >> > +        * Sequence number flag (1b),
>> >> > +        * N-PDU number flag (1b).
>> >> > +        */
>> >> > +       uint8_t v_pt_rsv_flags;
>> >> > +       uint8_t msg_type; /**< Message type. */
>> >> > +       rte_be16_t msg_len; /**< Message length. */
>> >> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
>> >>
>> >> In future, you might add support for GTPv2 (which is used since LTE).
>> >> Maybe this structure should have v1 in its name to avoid confusion?
>> >
>> > I considered it before. But I think we can modify it when we support GTPv2 in future, and keep concise 'GTP' currently:)  since I have described it matches v1 header.
>> >
>>
>> You could rename v_pt_rsv_flags to version_flags to avoid some future
>> code changes to support GTPv2. There's still the issue that not all
>> GTPv2 messages have a TEID though.
>
> Although they have the same size, the header of these two protocols
> obviously differs. My suggestion would be to go with a separate GTPv2
> pattern item using its own dedicated structure instead.
>
> --
> Adrien Mazarguil
> 6WIND

The 1st four bytes are the same (flags in first byte have different
meanings, but the bits indicating the version are in the same
location). After that, different fields in each version are optional,
and the headers have variable size. A single structure could be used
if the first field is renamed to something like "version_flags", and
then check that the teid field in item->mask is not set if
((version_flags >> 5 == 2) && ((version_flags >> 4) & 1) == 1). If
there's going to be two structures, it would be good to put v1 and v2
in the names, in my opinion.

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
                                 ` (7 preceding siblings ...)
  2017-09-29 15:50               ` [PATCH v7 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
@ 2017-10-04 22:43               ` Ferruh Yigit
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
  9 siblings, 0 replies; 116+ messages in thread
From: Ferruh Yigit @ 2017-10-04 22:43 UTC (permalink / raw)
  To: Beilei Xing, jingjing.wu; +Cc: andrey.chilikin, dev

On 9/29/2017 4:50 PM, Beilei Xing wrote:
> This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
> It depends on Kirill's patch:
> http://www.dpdk.org/dev/patchwork/patch/29325/
> 
> v7 changes:
>  - Distinguish GTP-C request and response message in mbuf description.
>  - Clarify GTP-C response message is not supported.
>  - Version_type 0x30 is invalid for GTP-C, replace with 0x32.
>  - Refine metadata parsing function.
>  - Rework for checking fdir programming status.
> 
> v6 changes:
>  - Reword description of GTP item and GTP structure, mainly support
>    GTPv1, not include GTPv0 and GTPv2.
> 
> v5 changes:
>  - Fix code style.
>  - Reword commit log.
> 
> v4 changes:
>  - Refine fdir related code.
>  - Rework profile metadata parsing function.
>  - Fix code style.
> 
> v3 changes:
>  - Rework implementation to support the new profile.
>  - Add GTPC and GTPU tunnel type in software packet type parser.
>  - Update ptype info when loading profile.
>  - Fix bug of updating pctype info.
> 
> 
> v2 changes:
>  - Enable RSS/FDIR/cloud filter dinamicly by checking profile
>  - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
>  - Rework FDIR/cloud filter enabling function
> 
> Beilei Xing (8):
>   mbuf: support GTP in software packet type parser
>   net/i40e: update ptype and pctype info
>   net/i40e: support RSS for new pctype
>   ethdev: add GTP items to support flow API
>   net/i40e: finish integration FDIR with generic flow API
>   net/i40e: add FDIR support for GTP-C and GTP-U
>   net/i40e: add cloud filter parsing function for GTP
>   net/i40e: enable cloud filter for GTP-C and GTP-U

Series Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>

I don't know about GTP internals , but set passes from my scripts.

But there was a merge conflict (on top of master tree) on patch 3, it is
easy to resolve but I wonder if there is a dependency not mentioned? If
not can you please double check the patch.

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v7 2/8] net/i40e: update ptype and pctype info
  2017-09-29 15:50               ` [PATCH v7 2/8] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-10-05  2:51                 ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  2:51 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 11:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v7 2/8] net/i40e: update ptype and pctype info
> 
> Update new packet type and new pctype info when downloading
> profile.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v7 5/8] net/i40e: finish integration FDIR with generic flow API
  2017-09-29 15:50               ` [PATCH v7 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-10-05  2:52                 ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  2:52 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 11:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v7 5/8] net/i40e: finish integration FDIR with generic flow API
> 
> rte_eth_fdir_* structures are still used in FDIR functions.
> This patch adds i40e private FDIR related structures and
> functions to finish integration FDIR with generic flow API.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v7 6/8] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-09-29 15:50               ` [PATCH v7 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-10-05  3:09                 ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  3:09 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev

> @@ -975,27 +1003,30 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input
> *fdir_input,
>  	raw_pkt += sizeof(uint16_t);
>  	len += sizeof(uint16_t);
> 
> -	switch (fdir_input->pctype) {
> -	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
> +	if (is_customized_pctype) {
> +		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
> +		if (!cus_pctype)
> +			PMD_DRV_LOG(ERR, "unknown pctype %u.",
> +				    fdir_input->pctype);
Doesn't return here?
If it is impossible cus_pctype is NULL, the check is unnecessary.
Because you used the cus_ptype below.

Thanks
Jingjing

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v7 7/8] net/i40e: add cloud filter parsing function for GTP
  2017-09-29 15:50               ` [PATCH v7 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-10-05  3:13                 ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  3:13 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 11:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v7 7/8] net/i40e: add cloud filter parsing function for GTP
> 
> This patch adds i40e_flow_parse_gtp_filter parsing
> function for GTP-C and GTP-U to support cloud filter.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v7 4/8] ethdev: add GTP items to support flow API
  2017-09-29 15:50               ` [PATCH v7 4/8] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-10-05  8:01                 ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  8:01 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 11:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v7 4/8] ethdev: add GTP items to support flow API
> 
> This patch adds GTP, GTPC and GTPU items for
> generic flow API, and also exposes item fields
> through the flow command.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>

Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v7 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-09-29 15:50               ` [PATCH v7 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
@ 2017-10-05  8:03                 ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  8:03 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Friday, September 29, 2017 11:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v7 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U
> 
> This patch sets TEID of GTP-C and GTP-U as filter type
> by replacing existed filter types inner_mac and TUNNEL_KEY.
> This configuration will be set when adding GTP-C or
> GTP-U filter rules, and it will be invalid only by
> NIC core reset.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-10-03  8:56                       ` Sean Harte
@ 2017-10-05  8:06                         ` Wu, Jingjing
  2017-10-05  8:30                           ` Adrien Mazarguil
  0 siblings, 1 reply; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  8:06 UTC (permalink / raw)
  To: Sean Harte, Adrien Mazarguil; +Cc: Xing, Beilei, Chilikin, Andrey, dev



> -----Original Message-----
> From: Sean Harte [mailto:seanbh@gmail.com]
> Sent: Tuesday, October 3, 2017 4:57 PM
> To: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> Cc: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Chilikin,
> Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to support flow API
> 
> On 2 October 2017 at 13:27, Adrien Mazarguil <adrien.mazarguil@6wind.com> wrote:
> > On Fri, Sep 29, 2017 at 10:29:55AM +0100, Sean Harte wrote:
> >> On 29 September 2017 at 09:54, Xing, Beilei <beilei.xing@intel.com> wrote:
> > <snip>
> >> >> >  /**
> >> >> > + * RTE_FLOW_ITEM_TYPE_GTP.
> >> >> > + *
> >> >> > + * Matches a GTPv1 header.
> >> >> > + */
> >> >> > +struct rte_flow_item_gtp {
> >> >> > +       /**
> >> >> > +        * Version (3b), protocol type (1b), reserved (1b),
> >> >> > +        * Extension header flag (1b),
> >> >> > +        * Sequence number flag (1b),
> >> >> > +        * N-PDU number flag (1b).
> >> >> > +        */
> >> >> > +       uint8_t v_pt_rsv_flags;
> >> >> > +       uint8_t msg_type; /**< Message type. */
> >> >> > +       rte_be16_t msg_len; /**< Message length. */
> >> >> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
> >> >>
> >> >> In future, you might add support for GTPv2 (which is used since LTE).
> >> >> Maybe this structure should have v1 in its name to avoid confusion?
> >> >
> >> > I considered it before. But I think we can modify it when we support GTPv2 in future,
> and keep concise 'GTP' currently:)  since I have described it matches v1 header.
> >> >
> >>
> >> You could rename v_pt_rsv_flags to version_flags to avoid some future
> >> code changes to support GTPv2. There's still the issue that not all
> >> GTPv2 messages have a TEID though.
> >
> > Although they have the same size, the header of these two protocols
> > obviously differs. My suggestion would be to go with a separate GTPv2
> > pattern item using its own dedicated structure instead.
> >
> > --
> > Adrien Mazarguil
> > 6WIND
> 
> The 1st four bytes are the same (flags in first byte have different
> meanings, but the bits indicating the version are in the same
> location). After that, different fields in each version are optional,
> and the headers have variable size. A single structure could be used
> if the first field is renamed to something like "version_flags", and
> then check that the teid field in item->mask is not set if
> ((version_flags >> 5 == 2) && ((version_flags >> 4) & 1) == 1). If
> there's going to be two structures, it would be good to put v1 and v2
> in the names, in my opinion.

I think the name GTP is OK for now. Due to v1 and v2 are different, why not rename them
when the v2 supporting are introduced?





^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v8 0/7] net/i40e: GPT-C and GTP-U enabling
  2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
                                 ` (8 preceding siblings ...)
  2017-10-04 22:43               ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Ferruh Yigit
@ 2017-10-05  8:14               ` Beilei Xing
  2017-10-05  8:14                 ` [PATCH v8 1/7] mbuf: support GTP in software packet type parser Beilei Xing
                                   ` (8 more replies)
  9 siblings, 9 replies; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.

v8 changes:
 - Remove 'enable RSS for new pctype' as it can be set with the
   configuration in Kirill's patch.
 - Resolve conflicts.

v7 changes:
 - Distinguish GTP-C request and response message in mbuf description.
 - Clarify GTP-C response message is not supported.
 - Version_type 0x30 is invalid for GTP-C, replace with 0x32.
 - Refine metadata parsing function.
 - Rework for checking fdir programming status.

v6 changes:
 - Reword description of GTP item and GTP structure, mainly support
   GTPv1, not include GTPv0 and GTPv2.

v5 changes:
 - Fix code style.
 - Reword commit log.

v4 changes:
 - Refine fdir related code.
 - Rework profile metadata parsing function.
 - Fix code style.

v3 changes:
 - Rework implementation to support the new profile.
 - Add GTPC and GTPU tunnel type in software packet type parser.
 - Update ptype info when loading profile.
 - Fix bug of updating pctype info.


v2 changes:
 - Enable RSS/FDIR/cloud filter dinamicly by checking profile
 - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
 - Rework FDIR/cloud filter enabling function

Beilei Xing (7):
  mbuf: support GTP in software packet type parser
  net/i40e: update ptype and pctype info
  ethdev: add GTP items to support flow API
  net/i40e: finish integration FDIR with generic flow API
  net/i40e: add FDIR support for GTP-C and GTP-U
  net/i40e: add cloud filter parsing function for GTP
  net/i40e: enable cloud filter for GTP-C and GTP-U

 app/test-pmd/cmdline_flow.c                 |  40 ++
 app/test-pmd/config.c                       |   3 +
 doc/guides/prog_guide/rte_flow.rst          |  17 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
 drivers/net/i40e/i40e_ethdev.c              | 505 +++++++++++++++++++++++-
 drivers/net/i40e/i40e_ethdev.h              | 156 +++++++-
 drivers/net/i40e/i40e_fdir.c                | 585 +++++++++++++++++++++++++++-
 drivers/net/i40e/i40e_flow.c                | 503 ++++++++++++++++++++----
 drivers/net/i40e/rte_pmd_i40e.c             |   6 +-
 lib/librte_ether/rte_flow.h                 |  52 +++
 lib/librte_mbuf/rte_mbuf_ptype.c            |   2 +
 lib/librte_mbuf/rte_mbuf_ptype.h            |  32 ++
 12 files changed, 1774 insertions(+), 131 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 116+ messages in thread

* [PATCH v8 1/7] mbuf: support GTP in software packet type parser
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
@ 2017-10-05  8:14                 ` Beilei Xing
  2017-10-05 11:50                   ` Sean Harte
  2017-10-05  8:14                 ` [PATCH v8 2/7] net/i40e: update ptype and pctype info Beilei Xing
                                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
---
 lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
 lib/librte_mbuf/rte_mbuf_ptype.h | 32 ++++++++++++++++++++++++++++++++
 2 files changed, 34 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index e5c4fae..a450814 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -89,6 +89,8 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
 	case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
 	case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
 	case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
+	case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
+	case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
 	default: return "TUNNEL_UNKNOWN";
 	}
 }
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index acd70bb..978c4a2 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -383,6 +383,38 @@ extern "C" {
  */
 #define RTE_PTYPE_TUNNEL_GRENAT             0x00006000
 /**
+ * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'source port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'source port'=2123>
+ */
+#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
+/**
+ * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2152>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2152>
+ */
+#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
+/**
  * Mask of tunneling packet types.
  */
 #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v8 2/7] net/i40e: update ptype and pctype info
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
  2017-10-05  8:14                 ` [PATCH v8 1/7] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-10-05  8:14                 ` Beilei Xing
  2017-10-05  8:14                 ` [PATCH v8 3/7] ethdev: add GTP items to support flow API Beilei Xing
                                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

Update new packet type and new pctype info when downloading
profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c  | 312 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_ethdev.h  |  24 ++++
 drivers/net/i40e/rte_pmd_i40e.c |   6 +-
 3 files changed, 341 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 0b151a0..3295da0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -65,6 +65,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_pf.h"
 #include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
 
 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
@@ -1042,6 +1043,21 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+	int i;
+
+	/* Initialize customized pctype */
+	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+		pf->customized_pctype[i].index = i;
+		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+		pf->customized_pctype[i].valid = false;
+	}
+
+	pf->gtp_support = false;
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1308,6 +1324,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
+	/* Initialize customized information */
+	i40e_init_customized_info(pf);
+
 	ret = i40e_init_ethtype_filter_list(dev);
 	if (ret < 0)
 		goto err_init_ethtype_filter_list;
@@ -10769,6 +10788,299 @@ is_i40e_supported(struct rte_eth_dev *dev)
 	return is_device_supported(dev, &rte_i40e_pmd);
 }
 
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+	int i;
+
+	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+		if (pf->customized_pctype[i].index == index)
+			return &pf->customized_pctype[i];
+	}
+	return NULL;
+}
+
+static int
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size, uint32_t proto_num,
+			      struct rte_pmd_i40e_proto_info *proto)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t pctype_num;
+	struct rte_pmd_i40e_ptype_info *pctype;
+	uint32_t buff_size;
+	struct i40e_customized_pctype *new_pctype = NULL;
+	uint8_t proto_id;
+	uint8_t pctype_value;
+	char name[64];
+	uint32_t i, j, n;
+	int ret;
+
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&pctype_num, sizeof(pctype_num),
+				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype number");
+		return -1;
+	}
+	if (!pctype_num) {
+		PMD_DRV_LOG(INFO, "No new pctype added");
+		return -1;
+	}
+
+	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+	pctype = rte_zmalloc("new_pctype", buff_size, 0);
+	if (!pctype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+	/* get information about new pctype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)pctype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get pctype list");
+		rte_free(pctype);
+		return -1;
+	}
+
+	/* Update customized pctype. */
+	for (i = 0; i < pctype_num; i++) {
+		pctype_value = pctype[i].ptype_id;
+		memset(name, 0, sizeof(name));
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = pctype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				strcat(name, proto[n].name);
+				strcat(name, "_");
+				break;
+			}
+		}
+		name[strlen(name) - 1] = '\0';
+		if (!strcmp(name, "GTPC"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPC);
+		else if (!strcmp(name, "GTPU_IPV4"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (!strcmp(name, "GTPU_IPV6"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						   I40E_CUSTOMIZED_GTPU_IPV6);
+		else if (!strcmp(name, "GTPU"))
+			new_pctype =
+				i40e_find_customized_pctype(pf,
+						      I40E_CUSTOMIZED_GTPU);
+		if (new_pctype) {
+			new_pctype->pctype = pctype_value;
+			new_pctype->valid = true;
+		}
+	}
+
+	rte_free(pctype);
+	return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+			       uint32_t pkg_size, uint32_t proto_num,
+			       struct rte_pmd_i40e_proto_info *proto)
+{
+	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+	uint8_t port_id = dev->data->port_id;
+	uint32_t ptype_num;
+	struct rte_pmd_i40e_ptype_info *ptype;
+	uint32_t buff_size;
+	uint8_t proto_id;
+	char name[16];
+	uint32_t i, j, n;
+	bool inner_ip;
+	int ret;
+
+	/* get information about new ptype num */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				(uint8_t *)&ptype_num, sizeof(ptype_num),
+				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype number");
+		return ret;
+	}
+	if (!ptype_num) {
+		PMD_DRV_LOG(INFO, "No new ptype added");
+		return -1;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+	ptype = rte_zmalloc("new_ptype", buff_size, 0);
+	if (!ptype) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return -1;
+	}
+
+	/* get information about new ptype list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)ptype, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get ptype list");
+		rte_free(ptype);
+		return ret;
+	}
+
+	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+	if (!ptype_mapping) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		rte_free(ptype);
+		return -1;
+	}
+
+	/* Update ptype mapping table. */
+	for (i = 0; i < ptype_num; i++) {
+		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+		ptype_mapping[i].sw_ptype = 0;
+		inner_ip = false;
+		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+			proto_id = ptype[i].protocols[j];
+			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+				continue;
+			for (n = 0; n < proto_num; n++) {
+				if (proto[n].proto_id != proto_id)
+					continue;
+				memset(name, 0, sizeof(name));
+				strcpy(name, proto[n].name);
+				if (!strncmp(name, "IPV4", 4) && !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strncmp(name, "IPV4", 4) &&
+					   inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+				} else if (!strncmp(name, "IPV6", 4) &&
+					   !inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+					inner_ip = true;
+				} else if (!strncmp(name, "IPV6", 4) &&
+					   inner_ip) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+				} else if (!strncmp(name, "IPV4FRAG", 8)) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strncmp(name, "IPV6FRAG", 8)) {
+					ptype_mapping[i].sw_ptype |=
+					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_FRAG;
+				} else if (!strncmp(name, "GTPC", 4))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPC;
+				else if (!strncmp(name, "GTPU", 4))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_TUNNEL_GTPU;
+				else if (!strncmp(name, "UDP", 3))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_UDP;
+				else if (!strncmp(name, "TCP", 3))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_TCP;
+				else if (!strncmp(name, "SCTP", 4))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_SCTP;
+				else if (!strncmp(name, "ICMP", 4) ||
+					 !strncmp(name, "ICMPV6", 6))
+					ptype_mapping[i].sw_ptype |=
+						RTE_PTYPE_INNER_L4_ICMP;
+
+				break;
+			}
+		}
+	}
+
+	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+						ptype_num, 0);
+	if (ret)
+		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+
+	rte_free(ptype_mapping);
+	rte_free(ptype);
+	return ret;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+			      uint32_t pkg_size)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t proto_num;
+	struct rte_pmd_i40e_proto_info *proto;
+	uint32_t buff_size;
+	uint32_t i;
+	int ret;
+
+	/* get information about protocol number */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+				       (uint8_t *)&proto_num, sizeof(proto_num),
+				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol number");
+		return;
+	}
+	if (!proto_num) {
+		PMD_DRV_LOG(INFO, "No new protocol added");
+		return;
+	}
+
+	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+	proto = rte_zmalloc("new_proto", buff_size, 0);
+	if (!proto) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
+		return;
+	}
+
+	/* get information about protocol list */
+	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+					(uint8_t *)proto, buff_size,
+					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get protocol list");
+		rte_free(proto);
+		return;
+	}
+
+	/* Check if GTP is supported. */
+	for (i = 0; i < proto_num; i++) {
+		if (!strncmp(proto[i].name, "GTP", 3)) {
+			pf->gtp_support = true;
+			break;
+		}
+	}
+
+	/* Update customized pctype info */
+	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+					    proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+	/* Update customized ptype info */
+	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+					   proto_num, proto);
+	if (ret)
+		PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+	rte_free(proto);
+}
+
 /* Create a QinQ cloud filter
  *
  * The Fortville NIC has limited resources for tunnel filters,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 5b84da2..9688ea8 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -723,6 +723,21 @@ struct i40e_tm_conf {
 	bool committed;
 };
 
+enum i40e_new_pctype {
+	I40E_CUSTOMIZED_GTPC = 0,
+	I40E_CUSTOMIZED_GTPU_IPV4,
+	I40E_CUSTOMIZED_GTPU_IPV6,
+	I40E_CUSTOMIZED_GTPU,
+	I40E_CUSTOMIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID     0
+struct i40e_customized_pctype {
+	enum i40e_new_pctype index;  /* Indicate which customized pctype */
+	uint8_t pctype;   /* New pctype value */
+	bool valid;   /* Check if it's valid */
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -787,6 +802,11 @@ struct i40e_pf {
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
+
+	/* Dynamic Device Personalization */
+	bool gtp_support; /* 1 - support GTP-C and GTP-U */
+	/* customer customized pctype */
+	struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
 };
 
 enum pending_msg {
@@ -1012,6 +1032,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+				 uint32_t pkg_size);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index 6096542..0cd2d7a 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1608,6 +1608,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
 		return -EINVAL;
 	}
 
+	i40e_update_customized_info(dev, buff, size);
+
 	/* Find metadata segment */
 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
 							pkg_hdr);
@@ -2106,7 +2108,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
 	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
 	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
 	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
-	    tnl != RTE_PTYPE_TUNNEL_GRENAT)
+	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
+	    tnl != RTE_PTYPE_TUNNEL_GTPU)
 		return -1;
 
 	if (il2 &&
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v8 3/7] ethdev: add GTP items to support flow API
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
  2017-10-05  8:14                 ` [PATCH v8 1/7] mbuf: support GTP in software packet type parser Beilei Xing
  2017-10-05  8:14                 ` [PATCH v8 2/7] net/i40e: update ptype and pctype info Beilei Xing
@ 2017-10-05  8:14                 ` Beilei Xing
  2017-10-05 11:50                   ` Sean Harte
  2017-10-05  8:14                 ` [PATCH v8 4/7] net/i40e: finish integration FDIR with generic " Beilei Xing
                                   ` (5 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds GTP, GTPC and GTPU items for
generic flow API, and also exposes item fields
through the flow command.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
 app/test-pmd/config.c                       |  3 ++
 doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
 lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
 5 files changed, 116 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a004..26c3e4f 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
 	ITEM_GRE_PROTO,
 	ITEM_FUZZY,
 	ITEM_FUZZY_THRESH,
+	ITEM_GTP,
+	ITEM_GTP_TEID,
+	ITEM_GTPC,
+	ITEM_GTPU,
 
 	/* Validate/create actions. */
 	ACTIONS,
@@ -451,6 +455,9 @@ static const enum index next_item[] = {
 	ITEM_MPLS,
 	ITEM_GRE,
 	ITEM_FUZZY,
+	ITEM_GTP,
+	ITEM_GTPC,
+	ITEM_GTPU,
 	ZERO,
 };
 
@@ -588,6 +595,12 @@ static const enum index item_gre[] = {
 	ZERO,
 };
 
+static const enum index item_gtp[] = {
+	ITEM_GTP_TEID,
+	ITEM_NEXT,
+	ZERO,
+};
+
 static const enum index next_action[] = {
 	ACTION_END,
 	ACTION_VOID,
@@ -1421,6 +1434,33 @@ static const struct token token_list[] = {
 		.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
 					thresh)),
 	},
+	[ITEM_GTP] = {
+		.name = "gtp",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTP_TEID] = {
+		.name = "teid",
+		.help = "tunnel endpoint identifier",
+		.next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+		.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+	},
+	[ITEM_GTPC] = {
+		.name = "gtpc",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
+	[ITEM_GTPU] = {
+		.name = "gtpu",
+		.help = "match GTP header",
+		.priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+		.next = NEXT(item_gtp),
+		.call = parse_vc,
+	},
 
 	/* Validate/create actions. */
 	[ACTIONS] = {
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 60a8d07..4ec8f0d 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -952,6 +952,9 @@ static const struct {
 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
 };
 
 /** Compute storage space needed by item specification. */
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a912..73f12ee 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -955,6 +955,23 @@ Usage example, fuzzy match a TCPv4 packets:
    | 4     | END      |
    +-------+----------+
 
+Item: ``GTP``, ``GTPC``, ``GTPU``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches a GTPv1 header.
+
+Note: GTP, GTPC and GTPU use the same structure. GTPC and GTPU item
+are defined for a user-friendly API when creating GTP-C and GTP-U
+flow rules.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+  extension header flag (1b), sequence number flag (1b), N-PDU number
+  flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: tunnel endpoint identifier.
+- Default ``mask`` matches teid only.
+
 Actions
 ~~~~~~~
 
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index aeef3e1..32223ca 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2721,6 +2721,10 @@ This section lists supported pattern items and their attributes, if any.
 
   - ``thresh {unsigned}``: accuracy threshold.
 
+- ``gtp``, ``gtpc``, ``gtpu``: match GTPv1 header.
+
+  - ``teid {unsigned}``: tunnel endpoint identifier.
+
 Actions list
 ^^^^^^^^^^^^
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169..b1a1b97 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -309,6 +309,33 @@ enum rte_flow_item_type {
 	 * See struct rte_flow_item_fuzzy.
 	 */
 	RTE_FLOW_ITEM_TYPE_FUZZY,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTP,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-C packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPC,
+
+	/**
+	 * Matches a GTP header.
+	 *
+	 * Configure flow for GTP-U packets.
+	 *
+	 * See struct rte_flow_item_gtp.
+	 */
+	RTE_FLOW_ITEM_TYPE_GTPU,
 };
 
 /**
@@ -735,6 +762,31 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
 #endif
 
 /**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTPv1 header.
+ */
+struct rte_flow_item_gtp {
+	/**
+	 * Version (3b), protocol type (1b), reserved (1b),
+	 * Extension header flag (1b),
+	 * Sequence number flag (1b),
+	 * N-PDU number flag (1b).
+	 */
+	uint8_t v_pt_rsv_flags;
+	uint8_t msg_type; /**< Message type. */
+	rte_be16_t msg_len; /**< Message length. */
+	rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+	.teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v8 4/7] net/i40e: finish integration FDIR with generic flow API
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
                                   ` (2 preceding siblings ...)
  2017-10-05  8:14                 ` [PATCH v8 3/7] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-10-05  8:14                 ` Beilei Xing
  2017-10-05  8:14                 ` [PATCH v8 5/7] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
                                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

rte_eth_fdir_* structures are still used in FDIR functions.
This patch adds i40e private FDIR related structures and
functions to finish integration FDIR with generic flow API.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  83 ++++++-
 drivers/net/i40e/i40e_fdir.c   | 487 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/i40e/i40e_flow.c   |  77 +++----
 3 files changed, 584 insertions(+), 63 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9688ea8..ef4c503 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -461,6 +461,80 @@ struct i40e_vmdq_info {
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
 /*
+ * A union contains the inputs for all types of flow
+ * items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+	struct rte_eth_l2_flow     l2_flow;
+	struct rte_eth_udpv4_flow  udp4_flow;
+	struct rte_eth_tcpv4_flow  tcp4_flow;
+	struct rte_eth_sctpv4_flow sctp4_flow;
+	struct rte_eth_ipv4_flow   ip4_flow;
+	struct rte_eth_udpv6_flow  udp6_flow;
+	struct rte_eth_tcpv6_flow  tcp6_flow;
+	struct rte_eth_sctpv6_flow sctp6_flow;
+	struct rte_eth_ipv6_flow   ipv6_flow;
+};
+
+/* A structure used to contain extend input of flow */
+struct i40e_fdir_flow_ext {
+	uint16_t vlan_tci;
+	uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+	/* It is filled by the flexible payload to match. */
+	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
+	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+};
+
+/* A structure used to define the input for a flow director filter entry */
+struct i40e_fdir_input {
+	enum i40e_filter_pctype pctype;
+	union i40e_fdir_flow flow;
+	/* Flow fields to match, dependent on flow_type */
+	struct i40e_fdir_flow_ext flow_ext;
+	/* Additional fields to match */
+};
+
+/* Behavior will be taken if FDIR match */
+enum i40e_fdir_behavior {
+	I40E_FDIR_ACCEPT = 0,
+	I40E_FDIR_REJECT,
+	I40E_FDIR_PASSTHRU,
+};
+
+/* Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+	I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */
+	I40E_FDIR_REPORT_ID,            /* Only report FD ID. */
+	I40E_FDIR_REPORT_ID_FLEX_4,     /* Report FD ID and 4 flex bytes. */
+	I40E_FDIR_REPORT_FLEX_8,        /* Report 8 flex bytes. */
+};
+
+/* A structure used to define an action when match FDIR packet filter. */
+struct i40e_fdir_action {
+	uint16_t rx_queue;        /* Queue assigned to if FDIR match. */
+	enum i40e_fdir_behavior behavior;     /* Behavior will be taken */
+	enum i40e_fdir_status report_status;  /* Status report option */
+	/* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+	 * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+	 * flex bytes start from in flexible payload.
+	 */
+	uint8_t flex_off;
+};
+
+/* A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+	uint32_t soft_id;
+	/* ID, an unique value is required when deal with FDIR entry */
+	struct i40e_fdir_input input;    /* Input set */
+	struct i40e_fdir_action action;  /* Action taken when match */
+};
+
+/*
  * Structure to store flex pit for flow diretor.
  */
 struct i40e_fdir_flex_pit {
@@ -484,7 +558,7 @@ struct i40e_fdir_flex_mask {
 
 struct i40e_fdir_filter {
 	TAILQ_ENTRY(i40e_fdir_filter) rules;
-	struct rte_eth_fdir_filter fdir;
+	struct i40e_fdir_filter_conf fdir;
 };
 
 TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -913,7 +987,7 @@ extern const struct rte_flow_ops i40e_flow_ops;
 
 union i40e_filter_t {
 	struct rte_eth_ethertype_filter ethertype_filter;
-	struct rte_eth_fdir_filter fdir_filter;
+	struct i40e_fdir_filter_conf fdir_filter;
 	struct rte_eth_tunnel_filter_conf tunnel_filter;
 	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
 };
@@ -990,7 +1064,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
 int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
 				 struct i40e_ethertype_filter_input *input);
 int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
-			    struct rte_eth_fdir_input *input);
+			    struct i40e_fdir_input *input);
 struct i40e_tunnel_filter *
 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
@@ -1003,6 +1077,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
 int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 			     const struct rte_eth_fdir_filter *filter,
 			     bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
 			       struct rte_eth_tunnel_filter_conf *tunnel_filter,
 			       uint8_t add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 268ada0..7b16584 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -100,13 +100,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
 			enum i40e_filter_pctype pctype,
 			const struct rte_eth_fdir_filter *filter,
 			bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter);
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input);
+			const struct i40e_fdir_input *input);
 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
 				   struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add);
 
 static int
 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -933,6 +938,263 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+				unsigned char *raw_pkt,
+				bool vlan)
+{
+	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+	uint16_t *ether_type;
+	uint8_t len = 2 * sizeof(struct ether_addr);
+	struct ipv4_hdr *ip;
+	struct ipv6_hdr *ip6;
+	static const uint8_t next_proto[] = {
+		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+	};
+
+	raw_pkt += 2 * sizeof(struct ether_addr);
+	if (vlan && fdir_input->flow_ext.vlan_tci) {
+		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+		rte_memcpy(raw_pkt + sizeof(uint16_t),
+			   &fdir_input->flow_ext.vlan_tci,
+			   sizeof(uint16_t));
+		raw_pkt += sizeof(vlan_frame);
+		len += sizeof(vlan_frame);
+	}
+	ether_type = (uint16_t *)raw_pkt;
+	raw_pkt += sizeof(uint16_t);
+	len += sizeof(uint16_t);
+
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		*ether_type = fdir_input->flow.l2_flow.ether_type;
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		ip = (struct ipv4_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+		/* set len to by default */
+		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+					fdir_input->flow.ip4_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+					fdir_input->flow.ip4_flow.ttl :
+					I40E_FDIR_IP_DEFAULT_TTL;
+		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+		len += sizeof(struct ipv4_hdr);
+		break;
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		ip6 = (struct ipv6_hdr *)raw_pkt;
+
+		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		ip6->vtc_flow =
+			rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					 (fdir_input->flow.ipv6_flow.tc <<
+					  I40E_FDIR_IPv6_TC_OFFSET));
+		ip6->payload_len =
+			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+					fdir_input->flow.ipv6_flow.proto :
+					next_proto[fdir_input->pctype];
+		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+					fdir_input->flow.ipv6_flow.hop_limits :
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		rte_memcpy(&ip6->src_addr,
+			   &fdir_input->flow.ipv6_flow.dst_ip,
+			   IPV6_ADDR_LEN);
+		rte_memcpy(&ip6->dst_addr,
+			   &fdir_input->flow.ipv6_flow.src_ip,
+			   IPV6_ADDR_LEN);
+		len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
+	}
+	return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+			     const struct i40e_fdir_input *fdir_input,
+			     unsigned char *raw_pkt)
+{
+	unsigned char *payload, *ptr;
+	struct udp_hdr *udp;
+	struct tcp_hdr *tcp;
+	struct sctp_hdr *sctp;
+	uint8_t size, dst = 0;
+	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+	int len;
+
+	/* fill the ethernet and IP head */
+	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+					      !!fdir_input->flow_ext.vlan_tci);
+	if (len < 0)
+		return -EINVAL;
+
+	/* fill the L4 head */
+	switch (fdir_input->pctype) {
+	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+		udp = (struct udp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+		tcp = (struct tcp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+		sctp = (struct sctp_hdr *)(raw_pkt + len);
+		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+		/**
+		 * The source and destination fields in the transmitted packet
+		 * need to be presented in a reversed order with respect
+		 * to the expected received packets.
+		 */
+		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+		break;
+
+	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
+	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+		payload = raw_pkt + len;
+		set_idx = I40E_FLXPLD_L3_IDX;
+		break;
+	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+		payload = raw_pkt + len;
+		/**
+		 * ARP packet is a special case on which the payload
+		 * starts after the whole ARP header
+		 */
+		if (fdir_input->flow.l2_flow.ether_type ==
+				rte_cpu_to_be_16(ETHER_TYPE_ARP))
+			payload += sizeof(struct arp_hdr);
+		set_idx = I40E_FLXPLD_L2_IDX;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
+		return -EINVAL;
+	}
+
+	/* fill the flexbytes to payload */
+	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+		size = pf->fdir.flex_set[pit_idx].size;
+		if (size == 0)
+			continue;
+		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+		ptr = payload +
+		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+		(void)rte_memcpy(ptr,
+				 &fdir_input->flow_ext.flexbytes[dst],
+				 size * sizeof(uint16_t));
+	}
+
+	return 0;
+}
+
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -1006,17 +1268,17 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
 }
 
 static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
 			 struct i40e_fdir_filter *filter)
 {
-	rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
 	return 0;
 }
 
 /* Check if there exists the flow director filter */
 static struct i40e_fdir_filter *
 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
-			const struct rte_eth_fdir_input *input)
+			const struct i40e_fdir_input *input)
 {
 	int ret;
 
@@ -1051,7 +1313,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
 
 /* Delete a flow director filter from the SW list */
 int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
 {
 	struct i40e_fdir_info *fdir_info = &pf->fdir;
 	struct i40e_fdir_filter *filter;
@@ -1081,16 +1343,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
  */
 int
 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
-			    const struct rte_eth_fdir_filter *filter,
-			    bool add)
+			 const struct rte_eth_fdir_filter *filter,
+			 bool add)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
 	enum i40e_filter_pctype pctype;
-	struct i40e_fdir_info *fdir_info = &pf->fdir;
-	struct i40e_fdir_filter *fdir_filter, *node;
-	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1114,6 +1373,65 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+		return ret;
+	}
+
+	if (hw->mac.type == I40E_MAC_X722) {
+		/* get translated pctype value in fd pctype register */
+		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+			hw, I40E_GLQF_FD_PCTYPES((int)pctype));
+	}
+
+	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+			    pctype);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct i40e_fdir_filter_conf *filter,
+			      bool add)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	enum i40e_filter_pctype pctype;
+	struct i40e_fdir_info *fdir_info = &pf->fdir;
+	struct i40e_fdir_filter *fdir_filter, *node;
+	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+	int ret = 0;
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
+		return -ENOTSUP;
+	}
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "Invalid queue ID");
+		return -EINVAL;
+	}
+	if (filter->input.flow_ext.is_vf &&
+	    filter->input.flow_ext.dst_id >= pf->vf_num) {
+		PMD_DRV_LOG(ERR, "Invalid VF ID");
+		return -EINVAL;
+	}
+
 	/* Check if there is the filter in SW list */
 	memset(&check_filter, 0, sizeof(check_filter));
 	i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1450,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
 
-	ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
 		return ret;
@@ -1141,9 +1459,12 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
 	if (hw->mac.type == I40E_MAC_X722) {
 		/* get translated pctype value in fd pctype register */
 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
-			hw, I40E_GLQF_FD_PCTYPES((int)pctype));
-	}
-	ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+			hw, I40E_GLQF_FD_PCTYPES(
+				(int)filter->input.pctype));
+	} else
+		pctype = filter->input.pctype;
+
+	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
 			    pctype);
@@ -1298,6 +1619,140 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 }
 
 /*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+				  enum i40e_filter_pctype pctype,
+				  const struct i40e_fdir_filter_conf *filter,
+				  bool add)
+{
+	struct i40e_tx_queue *txq = pf->fdir.txq;
+	struct i40e_rx_queue *rxq = pf->fdir.rxq;
+	const struct i40e_fdir_action *fdir_action = &filter->action;
+	volatile struct i40e_tx_desc *txdp;
+	volatile struct i40e_filter_program_desc *fdirdp;
+	uint32_t td_cmd;
+	uint16_t vsi_id, i;
+	uint8_t dest;
+
+	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+	fdirdp = (volatile struct i40e_filter_program_desc *)
+				(&txq->tx_ring[txq->tx_tail]);
+
+	fdirdp->qindex_flex_ptype_vsi =
+			rte_cpu_to_le_32((fdir_action->rx_queue <<
+					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((fdir_action->flex_off <<
+					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+	fdirdp->qindex_flex_ptype_vsi |=
+			rte_cpu_to_le_32((pctype <<
+					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+	if (filter->input.flow_ext.is_vf)
+		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+	else
+		/* Use LAN VSI Id by default */
+		vsi_id = pf->main_vsi->vsi_id;
+	fdirdp->qindex_flex_ptype_vsi |=
+		rte_cpu_to_le_32(((uint32_t)vsi_id <<
+				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+	fdirdp->dtype_cmd_cntindex =
+			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+	if (add)
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+	else
+		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	if (fdir_action->behavior == I40E_FDIR_REJECT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+	else {
+		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
+		return -EINVAL;
+	}
+
+	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+				I40E_TXD_FLTR_QW1_DEST_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+		rte_cpu_to_le_32((fdir_action->report_status <<
+				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+	fdirdp->dtype_cmd_cntindex |=
+			rte_cpu_to_le_32(
+			((uint32_t)pf->fdir.match_counter_index <<
+			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+	txdp = &txq->tx_ring[txq->tx_tail + 1];
+	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+	td_cmd = I40E_TX_DESC_CMD_EOP |
+		 I40E_TX_DESC_CMD_RS  |
+		 I40E_TX_DESC_CMD_DUMMY;
+
+	txdp->cmd_type_offset_bsz =
+		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+	/* Update the tx tail register */
+	rte_wmb();
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+	for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+		if ((txdp->cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+			break;
+		rte_delay_us(1);
+	}
+	if (i >= I40E_FDIR_MAX_WAIT_US) {
+		PMD_DRV_LOG(ERR,
+		    "Failed to program FDIR filter: time out to get DD on tx queue.");
+		return -ETIMEDOUT;
+	}
+	/* totally delay 10 ms to check programming status*/
+	rte_delay_us(I40E_FDIR_MAX_WAIT_US);
+	if (i40e_check_fdir_programming_status(rxq) < 0) {
+		PMD_DRV_LOG(ERR,
+		    "Failed to program FDIR filter: programming status reported.");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
  * i40e_fdir_flush - clear all filters of Flow Director table
  * @pf: board private structure
  */
@@ -1572,7 +2027,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
 
 	TAILQ_FOREACH(f, fdir_list, rules)
-		i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
 
 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
 	guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 5d8afc6..73af7fd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -84,11 +84,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					const struct rte_flow_item *pattern,
 					struct rte_flow_error *error,
-					struct rte_eth_fdir_filter *filter);
+					struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 				       const struct rte_flow_action *actions,
 				       struct rte_flow_error *error,
-				       struct rte_eth_fdir_filter *filter);
+				       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
 				 const struct rte_flow_action *actions,
 				 struct rte_flow_error *error,
@@ -2315,7 +2315,7 @@ static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     struct rte_eth_fdir_filter *filter)
+			     struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_item *item = pattern;
@@ -2329,8 +2329,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-	enum i40e_filter_pctype pctype;
+	enum i40e_filter_pctype pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
@@ -2402,7 +2401,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2420,7 +2419,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				}
 			}
 
-			flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
 			layer_idx = I40E_FLXPLD_L2_IDX;
 
 			break;
@@ -2457,13 +2456,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					input_set |= I40E_INSET_IPV4_PROTO;
 
 				/* Get filter info */
-				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+				pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 				/* Check if it is fragment. */
 				frag_off = ipv4_spec->hdr.fragment_offset;
 				frag_off = rte_be_to_cpu_16(frag_off);
 				if (frag_off & IPV4_HDR_OFFSET_MASK ||
 				    frag_off & IPV4_HDR_MF_FLAG)
-					flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
 				/* Get the filter info */
 				filter->input.flow.ip4_flow.proto =
@@ -2535,11 +2534,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				/* Check if it is fragment. */
 				if (ipv6_spec->hdr.proto ==
 				    I40E_IPV6_FRAG_HEADER)
-					flow_type =
-						RTE_ETH_FLOW_FRAG_IPV6;
+					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
 				else
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+					pctype =
+					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 			}
 
 			layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2572,11 +2570,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.tcp4_flow.src_port =
@@ -2616,11 +2614,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+					pctype =
+					       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.udp4_flow.src_port =
@@ -2663,11 +2661,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
 				/* Get filter info */
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-					flow_type =
-						RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+					pctype =
+					      I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
 					filter->input.flow.sctp4_flow.src_port =
@@ -2776,15 +2774,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	pctype = i40e_flowtype_to_pctype(pf->adapter, flow_type);
-	if (pctype == I40E_FILTER_PCTYPE_INVALID ||
-	    pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Unsupported flow type");
-		return -rte_errno;
-	}
-
 	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
 	if (ret == -1) {
 		rte_flow_error_set(error, EINVAL,
@@ -2798,7 +2787,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	filter->input.flow_type = flow_type;
+	filter->input.pctype = pctype;
 
 	/* Store flex mask to SW */
 	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
@@ -2833,7 +2822,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    struct rte_eth_fdir_filter *filter)
+			    struct i40e_fdir_filter_conf *filter)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	const struct rte_flow_action *act;
@@ -2856,13 +2845,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 					   "Invalid queue ID for FDIR.");
 			return -rte_errno;
 		}
-		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+		filter->action.behavior = I40E_FDIR_ACCEPT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_DROP:
-		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+		filter->action.behavior = I40E_FDIR_REJECT;
 		break;
 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-		filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+		filter->action.behavior = I40E_FDIR_PASSTHRU;
 		break;
 	default:
 		rte_flow_error_set(error, EINVAL,
@@ -2877,11 +2866,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
 	switch (act->type) {
 	case RTE_FLOW_ACTION_TYPE_MARK:
 		mark_spec = (const struct rte_flow_action_mark *)act->conf;
-		filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+		filter->action.report_status = I40E_FDIR_REPORT_ID;
 		filter->soft_id = mark_spec->id;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
-		filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
 		break;
 	case RTE_FLOW_ACTION_TYPE_END:
 		return 0;
@@ -2912,7 +2901,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error,
 			    union i40e_filter_t *filter)
 {
-	struct rte_eth_fdir_filter *fdir_filter =
+	struct i40e_fdir_filter_conf *fdir_filter =
 		&filter->fdir_filter;
 	int ret;
 
@@ -3878,7 +3867,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
 					i40e_ethertype_filter_list);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 				       &cons_filter.fdir_filter, 1);
 		if (ret)
 			goto free_flow;
@@ -3928,7 +3917,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 			      (struct i40e_tunnel_filter *)flow->rule);
 		break;
 	case RTE_ETH_FILTER_FDIR:
-		ret = i40e_add_del_fdir_filter(dev,
+		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
 		break;
 	default:
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v8 5/7] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
                                   ` (3 preceding siblings ...)
  2017-10-05  8:14                 ` [PATCH v8 4/7] net/i40e: finish integration FDIR with generic " Beilei Xing
@ 2017-10-05  8:14                 ` Beilei Xing
  2017-10-05 11:50                   ` Sean Harte
  2017-10-05  8:14                 ` [PATCH v8 6/7] net/i40e: add cloud filter parsing function for GTP Beilei Xing
                                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds FDIR support for GTP-C and GTP-U. The
input set of GTP-C and GTP-U is TEID.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |  30 +++++
 drivers/net/i40e/i40e_fdir.c   | 216 ++++++++++++++++++++++++---------
 drivers/net/i40e/i40e_flow.c   | 267 +++++++++++++++++++++++++++++++++++------
 3 files changed, 415 insertions(+), 98 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ef4c503..9cd2795 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -460,6 +460,25 @@ struct i40e_vmdq_info {
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 #define I40E_FDIR_IPv6_TC_OFFSET	20
 
+/* A structure used to define the input for GTP flow */
+struct i40e_gtp_flow {
+	struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
+	uint8_t msg_type;              /* Message type. */
+	uint32_t teid;                 /* TEID in big endian. */
+};
+
+/* A structure used to define the input for GTP IPV4 flow */
+struct i40e_gtp_ipv4_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv4_flow ip4;
+};
+
+/* A structure used to define the input for GTP IPV6 flow */
+struct i40e_gtp_ipv6_flow {
+	struct i40e_gtp_flow gtp;
+	struct rte_eth_ipv6_flow ip6;
+};
+
 /*
  * A union contains the inputs for all types of flow
  * items in flows need to be in big endian
@@ -474,6 +493,14 @@ union i40e_fdir_flow {
 	struct rte_eth_tcpv6_flow  tcp6_flow;
 	struct rte_eth_sctpv6_flow sctp6_flow;
 	struct rte_eth_ipv6_flow   ipv6_flow;
+	struct i40e_gtp_flow       gtp_flow;
+	struct i40e_gtp_ipv4_flow  gtp_ipv4_flow;
+	struct i40e_gtp_ipv6_flow  gtp_ipv6_flow;
+};
+
+enum i40e_fdir_ip_type {
+	I40E_FDIR_IPTYPE_IPV4,
+	I40E_FDIR_IPTYPE_IPV6,
 };
 
 /* A structure used to contain extend input of flow */
@@ -483,6 +510,9 @@ struct i40e_fdir_flow_ext {
 	/* It is filled by the flexible payload to match. */
 	uint8_t is_vf;   /* 1 for VF, 0 for port dev */
 	uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+	bool inner_ip;   /* If there is inner ip */
+	enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
+	bool customized_pctype; /* If customized pctype is used */
 };
 
 /* A structure used to define the input for a flow director filter entry */
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 7b16584..7c46578 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -71,6 +71,16 @@
 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
 #define I40E_FDIR_UDP_DEFAULT_LEN           400
+#define I40E_FDIR_GTP_DEFAULT_LEN           384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
+#define I40E_FDIR_INNER_IPV6_DEFAULT_LEN    344
+
+#define I40E_FDIR_GTPC_DST_PORT             2123
+#define I40E_FDIR_GTPU_DST_PORT             2152
+#define I40E_FDIR_GTP_VER_FLAG_0X30         0x30
+#define I40E_FDIR_GTP_VER_FLAG_0X32         0x32
+#define I40E_FDIR_GTP_MSG_TYPE_0X01         0x01
+#define I40E_FDIR_GTP_MSG_TYPE_0XFF         0xFF
 
 /* Wait time for fdir filter programming */
 #define I40E_FDIR_MAX_WAIT_US 10000
@@ -938,16 +948,34 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
 	return 0;
 }
 
+static struct i40e_customized_pctype *
+i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+	struct i40e_customized_pctype *cus_pctype;
+	enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
+
+	for (; i < I40E_CUSTOMIZED_MAX; i++) {
+		cus_pctype = &pf->customized_pctype[i];
+		if (pctype == cus_pctype->pctype)
+			return cus_pctype;
+	}
+	return NULL;
+}
+
 static inline int
-i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+				const struct i40e_fdir_input *fdir_input,
 				unsigned char *raw_pkt,
 				bool vlan)
 {
+	struct i40e_customized_pctype *cus_pctype = NULL;
 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
 	uint16_t *ether_type;
 	uint8_t len = 2 * sizeof(struct ether_addr);
 	struct ipv4_hdr *ip;
 	struct ipv6_hdr *ip6;
+	uint8_t pctype = fdir_input->pctype;
+	bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
 	static const uint8_t next_proto[] = {
 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
@@ -974,27 +1002,32 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 	raw_pkt += sizeof(uint16_t);
 	len += sizeof(uint16_t);
 
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	if (is_customized_pctype) {
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (!cus_pctype) {
+			PMD_DRV_LOG(ERR, "unknown pctype %u.",
+				    fdir_input->pctype);
+			return -1;
+		}
+	}
+
+	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
 		*ether_type = fdir_input->flow.l2_flow.ether_type;
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+		 is_customized_pctype) {
 		ip = (struct ipv4_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
 		ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
 		/* set len to by default */
 		ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
-		ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
-					fdir_input->flow.ip4_flow.proto :
-					next_proto[fdir_input->pctype];
 		ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
-					fdir_input->flow.ip4_flow.ttl :
-					I40E_FDIR_IP_DEFAULT_TTL;
+			fdir_input->flow.ip4_flow.ttl :
+			I40E_FDIR_IP_DEFAULT_TTL;
 		ip->type_of_service = fdir_input->flow.ip4_flow.tos;
 		/**
 		 * The source and destination fields in the transmitted packet
@@ -1003,13 +1036,22 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		 */
 		ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
 		ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+		if (!is_customized_pctype)
+			ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+				fdir_input->flow.ip4_flow.proto :
+				next_proto[fdir_input->pctype];
+		else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+			 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+			ip->next_proto_id = IPPROTO_UDP;
 		len += sizeof(struct ipv4_hdr);
-		break;
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+		   pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		ip6 = (struct ipv6_hdr *)raw_pkt;
 
 		*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -1020,11 +1062,11 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 		ip6->payload_len =
 			rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
 		ip6->proto = fdir_input->flow.ipv6_flow.proto ?
-					fdir_input->flow.ipv6_flow.proto :
-					next_proto[fdir_input->pctype];
+			fdir_input->flow.ipv6_flow.proto :
+			next_proto[fdir_input->pctype];
 		ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
-					fdir_input->flow.ipv6_flow.hop_limits :
-					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+			fdir_input->flow.ipv6_flow.hop_limits :
+			I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
 		/**
 		 * The source and destination fields in the transmitted packet
 		 * need to be presented in a reversed order with respect
@@ -1037,12 +1079,12 @@ i40e_flow_fdir_fill_eth_ip_head(const struct i40e_fdir_input *fdir_input,
 			   &fdir_input->flow.ipv6_flow.src_ip,
 			   IPV6_ADDR_LEN);
 		len += sizeof(struct ipv6_hdr);
-		break;
-	default:
+	} else {
 		PMD_DRV_LOG(ERR, "unknown pctype %u.",
 			    fdir_input->pctype);
 		return -1;
 	}
+
 	return len;
 }
 
@@ -1057,23 +1099,28 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 			     const struct i40e_fdir_input *fdir_input,
 			     unsigned char *raw_pkt)
 {
-	unsigned char *payload, *ptr;
+	unsigned char *payload = NULL;
+	unsigned char *ptr;
 	struct udp_hdr *udp;
 	struct tcp_hdr *tcp;
 	struct sctp_hdr *sctp;
+	struct rte_flow_item_gtp *gtp;
+	struct ipv4_hdr *gtp_ipv4;
+	struct ipv6_hdr *gtp_ipv6;
 	uint8_t size, dst = 0;
 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
 	int len;
+	uint8_t pctype = fdir_input->pctype;
+	struct i40e_customized_pctype *cus_pctype;
 
 	/* fill the ethernet and IP head */
-	len = i40e_flow_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
+	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
 					      !!fdir_input->flow_ext.vlan_tci);
 	if (len < 0)
 		return -EINVAL;
 
 	/* fill the L4 head */
-	switch (fdir_input->pctype) {
-	case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1084,9 +1131,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1097,9 +1142,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1110,15 +1153,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV4_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV4:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
 		udp = (struct udp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)udp + sizeof(struct udp_hdr);
 		/**
@@ -1129,9 +1168,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
 		tcp = (struct tcp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
 		/**
@@ -1142,9 +1179,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_SCTP:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
 		sctp = (struct sctp_hdr *)(raw_pkt + len);
 		payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
 		/**
@@ -1155,14 +1190,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
-		break;
-
-	case I40E_FILTER_PCTYPE_NONF_IPV6_OTHER:
-	case I40E_FILTER_PCTYPE_FRAG_IPV6:
+	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
 		payload = raw_pkt + len;
 		set_idx = I40E_FLXPLD_L3_IDX;
-		break;
-	case I40E_FILTER_PCTYPE_L2_PAYLOAD:
+	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
 		payload = raw_pkt + len;
 		/**
 		 * ARP packet is a special case on which the payload
@@ -1172,10 +1204,76 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
 				rte_cpu_to_be_16(ETHER_TYPE_ARP))
 			payload += sizeof(struct arp_hdr);
 		set_idx = I40E_FLXPLD_L2_IDX;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
-		return -EINVAL;
+	} else if (fdir_input->flow_ext.customized_pctype) {
+		/* If customized pctype is used */
+		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+		if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+		    cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
+			udp = (struct udp_hdr *)(raw_pkt + len);
+			udp->dgram_len =
+				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+			gtp = (struct rte_flow_item_gtp *)
+				((unsigned char *)udp + sizeof(struct udp_hdr));
+			gtp->msg_len =
+				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+			gtp->teid = fdir_input->flow.gtp_flow.teid;
+			gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
+
+			/* GTP-C message type is not supported. */
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
+				udp->dst_port =
+				      rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
+				gtp->v_pt_rsv_flags =
+					I40E_FDIR_GTP_VER_FLAG_0X32;
+			} else {
+				udp->dst_port =
+				      rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
+				gtp->v_pt_rsv_flags =
+					I40E_FDIR_GTP_VER_FLAG_0X30;
+			}
+
+			if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
+				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+				gtp_ipv4 = (struct ipv4_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv4->version_ihl =
+					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+				gtp_ipv4->next_proto_id = IPPROTO_IP;
+				gtp_ipv4->total_length =
+					rte_cpu_to_be_16(
+						I40E_FDIR_INNER_IP_DEFAULT_LEN);
+				payload = (unsigned char *)gtp_ipv4 +
+					sizeof(struct ipv4_hdr);
+			} else if (cus_pctype->index ==
+				   I40E_CUSTOMIZED_GTPU_IPV6) {
+				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+				gtp_ipv6 = (struct ipv6_hdr *)
+					((unsigned char *)gtp +
+					 sizeof(struct rte_flow_item_gtp));
+				gtp_ipv6->vtc_flow =
+					rte_cpu_to_be_32(
+					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
+				gtp_ipv6->proto = IPPROTO_NONE;
+				gtp_ipv6->payload_len =
+					rte_cpu_to_be_16(
+					      I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
+				gtp_ipv6->hop_limits =
+					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+				payload = (unsigned char *)gtp_ipv6 +
+					sizeof(struct ipv6_hdr);
+			} else
+				payload = (unsigned char *)gtp +
+					sizeof(struct rte_flow_item_gtp);
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "unknown pctype %u.",
+			    fdir_input->pctype);
+		return -1;
 	}
 
 	/* fill the flexbytes to payload */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 73af7fd..370c93b 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -189,6 +189,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +250,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1644,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
 	/* FDIR - support default flow type with flexible payload */
 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -2302,14 +2378,52 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
 	return 0;
 }
 
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+				enum rte_flow_item_type item_type,
+				struct i40e_fdir_filter_conf *filter)
+{
+	struct i40e_customized_pctype *cus_pctype = NULL;
+
+	switch (item_type) {
+	case RTE_FLOW_ITEM_TYPE_GTPC:
+		cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPC);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GTPU:
+		if (!filter->input.flow_ext.inner_ip)
+			cus_pctype = i40e_find_customized_pctype(pf,
+							 I40E_CUSTOMIZED_GTPU);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV4)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV4);
+		else if (filter->input.flow_ext.iip_type ==
+			 I40E_FDIR_IPTYPE_IPV6)
+			cus_pctype = i40e_find_customized_pctype(pf,
+						 I40E_CUSTOMIZED_GTPU_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type");
+		break;
+	}
+
+	if (cus_pctype)
+		return cus_pctype->pctype;
+
+	return I40E_FILTER_PCTYPE_INVALID;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
- * 3. Supported flow type and input set: refer to array
+ * 3. Default supported flow type and input set: refer to array
  *    valid_fdir_inset_table in i40e_ethdev.c.
  * 4. Mask of fields which need to be matched should be
  *    filled with 1.
  * 5. Mask of fields which needn't to be matched should be
  *    filled with 0.
+ * 6. GTP profile supports GTPv1 only.
+ * 7. GTP-C response message ('source_port' = 2123) is not supported.
  */
 static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
@@ -2326,14 +2440,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_vf *vf_spec;
 
-	enum i40e_filter_pctype pctype = 0;
+	uint8_t pctype = 0;
 	uint64_t input_set = I40E_INSET_NONE;
 	uint16_t frag_off;
 	enum rte_flow_item_type item_type;
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
 	uint32_t i, j;
 	uint8_t  ipv6_addr_mask[16] = {
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2351,12 +2467,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 	uint16_t outer_tpid;
 	uint16_t ether_type;
 	uint32_t vtc_flow_cpu;
+	bool outer_ip = true;
 	int ret;
 
 	memset(off_arr, 0, sizeof(off_arr));
 	memset(len_arr, 0, sizeof(len_arr));
 	memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
 	outer_tpid = i40e_get_outer_vlan(dev);
+	filter->input.flow_ext.customized_pctype = false;
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
@@ -2430,7 +2548,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv4_mask =
 				(const struct rte_flow_item_ipv4 *)item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
+			if (ipv4_spec && ipv4_mask && outer_ip) {
 				/* Check IPv4 mask and update input set */
 				if (ipv4_mask->hdr.version_ihl ||
 				    ipv4_mask->hdr.total_length ||
@@ -2475,9 +2593,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 					ipv4_spec->hdr.src_addr;
 				filter->input.flow.ip4_flow.dst_ip =
 					ipv4_spec->hdr.dst_addr;
+
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV4;
+			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv4 mask.");
+				return -rte_errno;
 			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+			if (outer_ip)
+				outer_ip = false;
 
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -2487,7 +2618,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			ipv6_mask =
 				(const struct rte_flow_item_ipv6 *)item->mask;
 
-			if (ipv6_spec && ipv6_mask) {
+			if (ipv6_spec && ipv6_mask && outer_ip) {
 				/* Check IPv6 mask and update input set */
 				if (ipv6_mask->hdr.payload_len) {
 					rte_flow_error_set(error, EINVAL,
@@ -2538,10 +2669,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 				else
 					pctype =
 					     I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
-			}
 
-			layer_idx = I40E_FLXPLD_L3_IDX;
+				layer_idx = I40E_FLXPLD_L3_IDX;
+			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+				filter->input.flow_ext.inner_ip = true;
+				filter->input.flow_ext.iip_type =
+					I40E_FDIR_IPTYPE_IPV6;
+			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid inner IPv6 mask");
+				return -rte_errno;
+			}
 
+			if (outer_ip)
+				outer_ip = false;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
@@ -2636,6 +2779,37 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 			layer_idx = I40E_FLXPLD_L4_IDX;
 
 			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			if (!pf->gtp_support) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported protocol");
+				return -rte_errno;
+			}
+
+			gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+				    gtp_mask->msg_type ||
+				    gtp_mask->msg_len ||
+				    gtp_mask->teid != UINT32_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				filter->input.flow.gtp_flow.teid =
+					gtp_spec->teid;
+				filter->input.flow_ext.customized_pctype = true;
+				cus_proto = item_type;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
 			sctp_spec =
 				(const struct rte_flow_item_sctp *)item->spec;
@@ -2774,43 +2948,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 		}
 	}
 
-	ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Conflict with the first rule's input set.");
-		return -rte_errno;
-	} else if (ret == -EINVAL) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM, item,
-				   "Invalid pattern mask.");
-		return -rte_errno;
+	/* Get customized pctype value */
+	if (filter->input.flow_ext.customized_pctype) {
+		pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Unsupported pctype");
+			return -rte_errno;
+		}
 	}
 
-	filter->input.pctype = pctype;
+	/* If customized pctype is not used, set fdir configuration.*/
+	if (!filter->input.flow_ext.customized_pctype) {
+		ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Conflict with the first rule's input set.");
+			return -rte_errno;
+		} else if (ret == -EINVAL) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, item,
+					   "Invalid pattern mask.");
+			return -rte_errno;
+		}
 
-	/* Store flex mask to SW */
-	ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-	if (ret == -1) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Exceed maximal number of bitmasks");
-		return -rte_errno;
-	} else if (ret == -2) {
-		rte_flow_error_set(error, EINVAL,
-				   RTE_FLOW_ERROR_TYPE_ITEM,
-				   item,
-				   "Conflict with the first flexible rule");
-		return -rte_errno;
-	} else if (ret > 0)
-		cfg_flex_msk = false;
+		/* Store flex mask to SW */
+		ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+		if (ret == -1) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Exceed maximal number of bitmasks");
+			return -rte_errno;
+		} else if (ret == -2) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Conflict with the first flexible rule");
+			return -rte_errno;
+		} else if (ret > 0)
+			cfg_flex_msk = false;
 
-	if (cfg_flex_pit)
-		i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+		if (cfg_flex_pit)
+			i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-	if (cfg_flex_msk)
-		i40e_flow_set_fdir_flex_msk(pf, pctype);
+		if (cfg_flex_msk)
+			i40e_flow_set_fdir_flex_msk(pf, pctype);
+	}
+
+	filter->input.pctype = pctype;
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v8 6/7] net/i40e: add cloud filter parsing function for GTP
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
                                   ` (4 preceding siblings ...)
  2017-10-05  8:14                 ` [PATCH v8 5/7] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-10-05  8:14                 ` Beilei Xing
  2017-10-05  8:14                 ` [PATCH v8 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
                                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch adds i40e_flow_parse_gtp_filter parsing
function for GTP-C and GTP-U to support cloud filter.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   2 +
 drivers/net/i40e/i40e_flow.c   | 153 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 155 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9cd2795..2b2ef69 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -704,6 +704,8 @@ enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_MPLSoUDP,
 	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_QINQ,
+	I40E_TUNNEL_TYPE_GTPC,
+	I40E_TUNNEL_TYPE_GTPU,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 370c93b..9470ff5 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -125,6 +125,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 				       const struct rte_flow_action actions[],
 				       struct rte_flow_error *error,
 				       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+				      const struct rte_flow_attr *attr,
+				      const struct rte_flow_item pattern[],
+				      const struct rte_flow_action actions[],
+				      struct rte_flow_error *error,
+				      union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1808,6 +1814,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* GTP-C & GTP-U */
+	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
 	/* QINQ */
 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -3825,6 +3836,148 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 /* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ * 5. GTP profile supports GTPv1 only.
+ * 6. GTP-C response message ('source_port' = 2123) is not supported.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+			    const struct rte_flow_item *pattern,
+			    struct rte_flow_error *error,
+			    struct i40e_tunnel_filter_conf *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_gtp *gtp_spec;
+	const struct rte_flow_item_gtp *gtp_mask;
+	enum rte_flow_item_type item_type;
+
+	if (!pf->gtp_support) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   item,
+				   "GTP is not supported by default.");
+		return -rte_errno;
+	}
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec and mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_GTPC:
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec =
+				(const struct rte_flow_item_gtp *)item->spec;
+			gtp_mask =
+				(const struct rte_flow_item_gtp *)item->mask;
+
+			if (!gtp_spec || !gtp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP item");
+				return -rte_errno;
+			}
+
+			if (gtp_mask->v_pt_rsv_flags ||
+			    gtp_mask->msg_type ||
+			    gtp_mask->msg_len ||
+			    gtp_mask->teid != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GTP mask");
+				return -rte_errno;
+			}
+
+			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+			   const struct rte_flow_attr *attr,
+			   const struct rte_flow_item pattern[],
+			   const struct rte_flow_action actions[],
+			   struct rte_flow_error *error,
+			   union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+					  error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
  *    filled with 1.
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* [PATCH v8 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
                                   ` (5 preceding siblings ...)
  2017-10-05  8:14                 ` [PATCH v8 6/7] net/i40e: add cloud filter parsing function for GTP Beilei Xing
@ 2017-10-05  8:14                 ` Beilei Xing
  2017-10-05  8:23                 ` [PATCH v8 0/7] net/i40e: GPT-C and GTP-U enabling Wu, Jingjing
  2017-10-05 21:13                 ` Ferruh Yigit
  8 siblings, 0 replies; 116+ messages in thread
From: Beilei Xing @ 2017-10-05  8:14 UTC (permalink / raw)
  To: jingjing.wu; +Cc: andrey.chilikin, dev

This patch sets TEID of GTP-C and GTP-U as filter type
by replacing existed filter types inner_mac and TUNNEL_KEY.
This configuration will be set when adding GTP-C or
GTP-U filter rules, and it will be invalid only by
NIC core reset.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 193 +++++++++++++++++++++++++++++++++++++----
 drivers/net/i40e/i40e_ethdev.h |  17 ++--
 drivers/net/i40e/i40e_flow.c   |  12 +--
 3 files changed, 191 insertions(+), 31 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3295da0..0b3b1fb 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7069,7 +7069,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
-	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 3 entries */
@@ -7117,12 +7117,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7140,12 +7140,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
 		I40E_AQC_MIRROR_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
 	filter_replace.new_filter_type =
-		I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* For GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	/* create L1 filter */
+	filter_replace.old_filter_type =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[2] = 0xFF;
+	filter_replace_buf.data[3] = 0xFF;
+	filter_replace_buf.data[4] =
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[6] = 0xFF;
+	filter_replace_buf.data[7] = 0xFF;
+
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
+	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	/* for GTP-C */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+	filter_replace_buf.data[4] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+					       &filter_replace_buf);
+	if (status < 0)
+		return status;
+
+	/* for GTP-U */
+	memset(&filter_replace, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+	memset(&filter_replace_buf, 0,
+	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+	filter_replace.old_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+	filter_replace.new_filter_type =
+		I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	/* Prepare the buffer, 2 entries */
+	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+	filter_replace_buf.data[0] |=
+		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 
@@ -7236,7 +7355,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x40;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
 		break;
 	case I40E_TUNNEL_TYPE_MPLSoGRE:
 		if (!pf->mpls_replace_flag) {
@@ -7252,7 +7371,37 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
 			0x0;
 		big_buffer = 1;
-		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+		break;
+	case I40E_TUNNEL_TYPE_GTPC:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+			0x0;
+		big_buffer = 1;
+		break;
+	case I40E_TUNNEL_TYPE_GTPU:
+		if (!pf->gtp_replace_flag) {
+			i40e_replace_gtp_l1_filter(pf);
+			i40e_replace_gtp_cloud_filter(pf);
+			pf->gtp_replace_flag = 1;
+		}
+		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+			(teid_le >> 16) & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+			teid_le & 0xFFFF;
+		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+			0x0;
+		big_buffer = 1;
 		break;
 	case I40E_TUNNEL_TYPE_QINQ:
 		if (!pf->qinq_replace_flag) {
@@ -7280,13 +7429,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
 
 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
 		pfilter->element.flags =
-			I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X11;
+	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+		pfilter->element.flags =
+			I40E_AQC_ADD_CLOUD_FILTER_0X12;
 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
 		pfilter->element.flags |=
-			I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+			I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	else {
 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
 						&pfilter->element.flags);
@@ -10746,14 +10901,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 			   sizeof(f->input.general_fields));
 
 		if (((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
 		    ((f->input.flags &
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-		     I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
 			big_buffer = 1;
 
 		if (big_buffer)
@@ -11141,7 +11296,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L1 filter */
 	filter_replace.old_filter_type =
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace.tr_bit = 0;
 
 	/* Prepare the buffer, 2 entries */
@@ -11172,13 +11327,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
 	/* create L2 filter, input for L2 filter will be L1 filter  */
 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
-	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 
 	/* Prepare the buffer, 2 entries */
 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
 	filter_replace_buf.data[0] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
-	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
 	filter_replace_buf.data[4] |=
 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 2b2ef69..4125a35 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -651,12 +651,16 @@ struct i40e_ethertype_rule {
 
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP	8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE	9
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10		0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11		0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11		0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12		0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13		0x13
+#define I40E_AQC_NEW_TR_21			21
+#define I40E_AQC_NEW_TR_22			22
 
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
@@ -906,6 +910,7 @@ struct i40e_pf {
 	bool floating_veb_list[I40E_MAX_VF];
 	struct i40e_flow_list flow_list;
 	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
+	bool gtp_replace_flag;   /* 1 - GTP-C/U filter replace is done */
 	bool qinq_replace_flag;  /* QINQ filter replace is done */
 	struct i40e_tm_conf tm_conf;
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 9470ff5..0d9c972 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4348,12 +4348,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		vsi = vf->vsi;
 	}
 
-	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-	    I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
 		big_buffer = 1;
 
 	if (big_buffer)
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 116+ messages in thread

* Re: [PATCH v8 0/7] net/i40e: GPT-C and GTP-U enabling
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
                                   ` (6 preceding siblings ...)
  2017-10-05  8:14                 ` [PATCH v8 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
@ 2017-10-05  8:23                 ` Wu, Jingjing
  2017-10-05 21:13                 ` Ferruh Yigit
  8 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  8:23 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: Chilikin, Andrey, dev



> -----Original Message-----
> From: Xing, Beilei
> Sent: Thursday, October 5, 2017 4:15 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Chilikin, Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: [PATCH v8 0/7] net/i40e: GPT-C and GTP-U enabling
> 
> This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
> 
> v8 changes:
>  - Remove 'enable RSS for new pctype' as it can be set with the
>    configuration in Kirill's patch.
>  - Resolve conflicts.
> 
> v7 changes:
>  - Distinguish GTP-C request and response message in mbuf description.
>  - Clarify GTP-C response message is not supported.
>  - Version_type 0x30 is invalid for GTP-C, replace with 0x32.
>  - Refine metadata parsing function.
>  - Rework for checking fdir programming status.
> 
> v6 changes:
>  - Reword description of GTP item and GTP structure, mainly support
>    GTPv1, not include GTPv0 and GTPv2.
> 
> v5 changes:
>  - Fix code style.
>  - Reword commit log.
> 
> v4 changes:
>  - Refine fdir related code.
>  - Rework profile metadata parsing function.
>  - Fix code style.
> 
> v3 changes:
>  - Rework implementation to support the new profile.
>  - Add GTPC and GTPU tunnel type in software packet type parser.
>  - Update ptype info when loading profile.
>  - Fix bug of updating pctype info.
> 
> 
> v2 changes:
>  - Enable RSS/FDIR/cloud filter dinamicly by checking profile
>  - Add GTPC and GTPU items to distinguish rule for GTP-C or GTP-U
>  - Rework FDIR/cloud filter enabling function
> 
> Beilei Xing (7):
>   mbuf: support GTP in software packet type parser
>   net/i40e: update ptype and pctype info
>   ethdev: add GTP items to support flow API
>   net/i40e: finish integration FDIR with generic flow API
>   net/i40e: add FDIR support for GTP-C and GTP-U
>   net/i40e: add cloud filter parsing function for GTP
>   net/i40e: enable cloud filter for GTP-C and GTP-U
> 
>  app/test-pmd/cmdline_flow.c                 |  40 ++
>  app/test-pmd/config.c                       |   3 +
>  doc/guides/prog_guide/rte_flow.rst          |  17 +
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |   4 +
>  drivers/net/i40e/i40e_ethdev.c              | 505 +++++++++++++++++++++++-
>  drivers/net/i40e/i40e_ethdev.h              | 156 +++++++-
>  drivers/net/i40e/i40e_fdir.c                | 585 +++++++++++++++++++++++++++-
>  drivers/net/i40e/i40e_flow.c                | 503 ++++++++++++++++++++----
>  drivers/net/i40e/rte_pmd_i40e.c             |   6 +-
>  lib/librte_ether/rte_flow.h                 |  52 +++
>  lib/librte_mbuf/rte_mbuf_ptype.c            |   2 +
>  lib/librte_mbuf/rte_mbuf_ptype.h            |  32 ++
>  12 files changed, 1774 insertions(+), 131 deletions(-)
> 
Acked-by: Jingjing Wu <jingjing.wu@intel.com>


Thanks
Jingjing

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-10-05  8:06                         ` Wu, Jingjing
@ 2017-10-05  8:30                           ` Adrien Mazarguil
  2017-10-05  8:39                             ` Wu, Jingjing
  0 siblings, 1 reply; 116+ messages in thread
From: Adrien Mazarguil @ 2017-10-05  8:30 UTC (permalink / raw)
  To: Wu, Jingjing; +Cc: Sean Harte, Xing, Beilei, Chilikin, Andrey, dev

On Thu, Oct 05, 2017 at 08:06:38AM +0000, Wu, Jingjing wrote:
> 
> 
> > -----Original Message-----
> > From: Sean Harte [mailto:seanbh@gmail.com]
> > Sent: Tuesday, October 3, 2017 4:57 PM
> > To: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > Cc: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Chilikin,
> > Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to support flow API
> > 
> > On 2 October 2017 at 13:27, Adrien Mazarguil <adrien.mazarguil@6wind.com> wrote:
> > > On Fri, Sep 29, 2017 at 10:29:55AM +0100, Sean Harte wrote:
> > >> On 29 September 2017 at 09:54, Xing, Beilei <beilei.xing@intel.com> wrote:
> > > <snip>
> > >> >> >  /**
> > >> >> > + * RTE_FLOW_ITEM_TYPE_GTP.
> > >> >> > + *
> > >> >> > + * Matches a GTPv1 header.
> > >> >> > + */
> > >> >> > +struct rte_flow_item_gtp {
> > >> >> > +       /**
> > >> >> > +        * Version (3b), protocol type (1b), reserved (1b),
> > >> >> > +        * Extension header flag (1b),
> > >> >> > +        * Sequence number flag (1b),
> > >> >> > +        * N-PDU number flag (1b).
> > >> >> > +        */
> > >> >> > +       uint8_t v_pt_rsv_flags;
> > >> >> > +       uint8_t msg_type; /**< Message type. */
> > >> >> > +       rte_be16_t msg_len; /**< Message length. */
> > >> >> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
> > >> >>
> > >> >> In future, you might add support for GTPv2 (which is used since LTE).
> > >> >> Maybe this structure should have v1 in its name to avoid confusion?
> > >> >
> > >> > I considered it before. But I think we can modify it when we support GTPv2 in future,
> > and keep concise 'GTP' currently:)  since I have described it matches v1 header.
> > >> >
> > >>
> > >> You could rename v_pt_rsv_flags to version_flags to avoid some future
> > >> code changes to support GTPv2. There's still the issue that not all
> > >> GTPv2 messages have a TEID though.
> > >
> > > Although they have the same size, the header of these two protocols
> > > obviously differs. My suggestion would be to go with a separate GTPv2
> > > pattern item using its own dedicated structure instead.
> > >
> > > --
> > > Adrien Mazarguil
> > > 6WIND
> > 
> > The 1st four bytes are the same (flags in first byte have different
> > meanings, but the bits indicating the version are in the same
> > location). After that, different fields in each version are optional,
> > and the headers have variable size. A single structure could be used
> > if the first field is renamed to something like "version_flags", and
> > then check that the teid field in item->mask is not set if
> > ((version_flags >> 5 == 2) && ((version_flags >> 4) & 1) == 1). If
> > there's going to be two structures, it would be good to put v1 and v2
> > in the names, in my opinion.
> 
> I think the name GTP is OK for now. Due to v1 and v2 are different, why not rename them
> when the v2 supporting are introduced?

In any case I'd rather avoid renaming and modifying existing items and
structure contents once part of the API to avoid API/ABI breakage that
require deprecation notices, user applications updates and so on; rte_flow
has been created as a kind of append-only API for this reason (of course
there are exceptions, such as a bad design choice for the VLAN item I intend
to fix at some point).

I'm fine with the name "GTP" as defined now and documented as matching
GTPv1. We can add "GTPv2"-themed definitions later when some implementation
provides the ability to match this version. If you want to append the "v1"
suffix right now to be more explicit, I'm also fine with that. Your call.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v6 4/8] ethdev: add GTP items to support flow API
  2017-10-05  8:30                           ` Adrien Mazarguil
@ 2017-10-05  8:39                             ` Wu, Jingjing
  0 siblings, 0 replies; 116+ messages in thread
From: Wu, Jingjing @ 2017-10-05  8:39 UTC (permalink / raw)
  To: Adrien Mazarguil; +Cc: Sean Harte, Xing, Beilei, Chilikin, Andrey, dev



> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Thursday, October 5, 2017 4:30 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Sean Harte <seanbh@gmail.com>; Xing, Beilei <beilei.xing@intel.com>; Chilikin,
> Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to support flow API
> 
> On Thu, Oct 05, 2017 at 08:06:38AM +0000, Wu, Jingjing wrote:
> >
> >
> > > -----Original Message-----
> > > From: Sean Harte [mailto:seanbh@gmail.com]
> > > Sent: Tuesday, October 3, 2017 4:57 PM
> > > To: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > > Cc: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Chilikin,
> > > Andrey <andrey.chilikin@intel.com>; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH v6 4/8] ethdev: add GTP items to support flow API
> > >
> > > On 2 October 2017 at 13:27, Adrien Mazarguil <adrien.mazarguil@6wind.com>
> wrote:
> > > > On Fri, Sep 29, 2017 at 10:29:55AM +0100, Sean Harte wrote:
> > > >> On 29 September 2017 at 09:54, Xing, Beilei <beilei.xing@intel.com> wrote:
> > > > <snip>
> > > >> >> >  /**
> > > >> >> > + * RTE_FLOW_ITEM_TYPE_GTP.
> > > >> >> > + *
> > > >> >> > + * Matches a GTPv1 header.
> > > >> >> > + */
> > > >> >> > +struct rte_flow_item_gtp {
> > > >> >> > +       /**
> > > >> >> > +        * Version (3b), protocol type (1b), reserved (1b),
> > > >> >> > +        * Extension header flag (1b),
> > > >> >> > +        * Sequence number flag (1b),
> > > >> >> > +        * N-PDU number flag (1b).
> > > >> >> > +        */
> > > >> >> > +       uint8_t v_pt_rsv_flags;
> > > >> >> > +       uint8_t msg_type; /**< Message type. */
> > > >> >> > +       rte_be16_t msg_len; /**< Message length. */
> > > >> >> > +       rte_be32_t teid; /**< Tunnel endpoint identifier. */ };
> > > >> >>
> > > >> >> In future, you might add support for GTPv2 (which is used since LTE).
> > > >> >> Maybe this structure should have v1 in its name to avoid confusion?
> > > >> >
> > > >> > I considered it before. But I think we can modify it when we support GTPv2 in
> future,
> > > and keep concise 'GTP' currently:)  since I have described it matches v1 header.
> > > >> >
> > > >>
> > > >> You could rename v_pt_rsv_flags to version_flags to avoid some future
> > > >> code changes to support GTPv2. There's still the issue that not all
> > > >> GTPv2 messages have a TEID though.
> > > >
> > > > Although they have the same size, the header of these two protocols
> > > > obviously differs. My suggestion would be to go with a separate GTPv2
> > > > pattern item using its own dedicated structure instead.
> > > >
> > > > --
> > > > Adrien Mazarguil
> > > > 6WIND
> > >
> > > The 1st four bytes are the same (flags in first byte have different
> > > meanings, but the bits indicating the version are in the same
> > > location). After that, different fields in each version are optional,
> > > and the headers have variable size. A single structure could be used
> > > if the first field is renamed to something like "version_flags", and
> > > then check that the teid field in item->mask is not set if
> > > ((version_flags >> 5 == 2) && ((version_flags >> 4) & 1) == 1). If
> > > there's going to be two structures, it would be good to put v1 and v2
> > > in the names, in my opinion.
> >
> > I think the name GTP is OK for now. Due to v1 and v2 are different, why not rename
> them
> > when the v2 supporting are introduced?
> 
> In any case I'd rather avoid renaming and modifying existing items and
> structure contents once part of the API to avoid API/ABI breakage that
> require deprecation notices, user applications updates and so on; rte_flow
> has been created as a kind of append-only API for this reason (of course
> there are exceptions, such as a bad design choice for the VLAN item I intend
> to fix at some point).
> 
> I'm fine with the name "GTP" as defined now and documented as matching
> GTPv1. We can add "GTPv2"-themed definitions later when some implementation
> provides the ability to match this version. If you want to append the "v1"
> suffix right now to be more explicit, I'm also fine with that. Your call.
> 
Got your point, I'm also fine with the name now for GTPv1, and add "GTPv2" when
It is supported.

Thanks
Jingjing

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v8 3/7] ethdev: add GTP items to support flow API
  2017-10-05  8:14                 ` [PATCH v8 3/7] ethdev: add GTP items to support flow API Beilei Xing
@ 2017-10-05 11:50                   ` Sean Harte
  0 siblings, 0 replies; 116+ messages in thread
From: Sean Harte @ 2017-10-05 11:50 UTC (permalink / raw)
  To: Beilei Xing; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

On 5 October 2017 at 09:14, Beilei Xing <beilei.xing@intel.com> wrote:
> This patch adds GTP, GTPC and GTPU items for
> generic flow API, and also exposes item fields
> through the flow command.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>
> ---
>  app/test-pmd/cmdline_flow.c                 | 40 ++++++++++++++++++++++
>  app/test-pmd/config.c                       |  3 ++
>  doc/guides/prog_guide/rte_flow.rst          | 17 ++++++++++
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  4 +++
>  lib/librte_ether/rte_flow.h                 | 52 +++++++++++++++++++++++++++++
>  5 files changed, 116 insertions(+)
>
> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> index a17a004..26c3e4f 100644
> --- a/app/test-pmd/cmdline_flow.c
> +++ b/app/test-pmd/cmdline_flow.c
> @@ -171,6 +171,10 @@ enum index {
>         ITEM_GRE_PROTO,
>         ITEM_FUZZY,
>         ITEM_FUZZY_THRESH,
> +       ITEM_GTP,
> +       ITEM_GTP_TEID,
> +       ITEM_GTPC,
> +       ITEM_GTPU,
>
>         /* Validate/create actions. */
>         ACTIONS,
> @@ -451,6 +455,9 @@ static const enum index next_item[] = {
>         ITEM_MPLS,
>         ITEM_GRE,
>         ITEM_FUZZY,
> +       ITEM_GTP,
> +       ITEM_GTPC,
> +       ITEM_GTPU,
>         ZERO,
>  };
>
> @@ -588,6 +595,12 @@ static const enum index item_gre[] = {
>         ZERO,
>  };
>
> +static const enum index item_gtp[] = {
> +       ITEM_GTP_TEID,
> +       ITEM_NEXT,
> +       ZERO,
> +};
> +
>  static const enum index next_action[] = {
>         ACTION_END,
>         ACTION_VOID,
> @@ -1421,6 +1434,33 @@ static const struct token token_list[] = {
>                 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
>                                         thresh)),
>         },
> +       [ITEM_GTP] = {
> +               .name = "gtp",
> +               .help = "match GTP header",
> +               .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
> +               .next = NEXT(item_gtp),
> +               .call = parse_vc,
> +       },
> +       [ITEM_GTP_TEID] = {
> +               .name = "teid",
> +               .help = "tunnel endpoint identifier",
> +               .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
> +               .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
> +       },
> +       [ITEM_GTPC] = {
> +               .name = "gtpc",
> +               .help = "match GTP header",
> +               .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
> +               .next = NEXT(item_gtp),
> +               .call = parse_vc,
> +       },
> +       [ITEM_GTPU] = {
> +               .name = "gtpu",
> +               .help = "match GTP header",
> +               .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
> +               .next = NEXT(item_gtp),
> +               .call = parse_vc,
> +       },
>
>         /* Validate/create actions. */
>         [ACTIONS] = {
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
> index 60a8d07..4ec8f0d 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -952,6 +952,9 @@ static const struct {
>         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
>         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
>         MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
> +       MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
> +       MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
> +       MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
>  };
>
>  /** Compute storage space needed by item specification. */
> diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
> index 662a912..73f12ee 100644
> --- a/doc/guides/prog_guide/rte_flow.rst
> +++ b/doc/guides/prog_guide/rte_flow.rst
> @@ -955,6 +955,23 @@ Usage example, fuzzy match a TCPv4 packets:
>     | 4     | END      |
>     +-------+----------+
>
> +Item: ``GTP``, ``GTPC``, ``GTPU``
> +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> +
> +Matches a GTPv1 header.
> +
> +Note: GTP, GTPC and GTPU use the same structure. GTPC and GTPU item
> +are defined for a user-friendly API when creating GTP-C and GTP-U
> +flow rules.
> +
> +- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
> +  extension header flag (1b), sequence number flag (1b), N-PDU number
> +  flag (1b).
> +- ``msg_type``: message type.
> +- ``msg_len``: message length.
> +- ``teid``: tunnel endpoint identifier.
> +- Default ``mask`` matches teid only.
> +
>  Actions
>  ~~~~~~~
>
> diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> index aeef3e1..32223ca 100644
> --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> @@ -2721,6 +2721,10 @@ This section lists supported pattern items and their attributes, if any.
>
>    - ``thresh {unsigned}``: accuracy threshold.
>
> +- ``gtp``, ``gtpc``, ``gtpu``: match GTPv1 header.
> +
> +  - ``teid {unsigned}``: tunnel endpoint identifier.
> +
>  Actions list
>  ^^^^^^^^^^^^
>
> diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
> index bba6169..b1a1b97 100644
> --- a/lib/librte_ether/rte_flow.h
> +++ b/lib/librte_ether/rte_flow.h
> @@ -309,6 +309,33 @@ enum rte_flow_item_type {
>          * See struct rte_flow_item_fuzzy.
>          */
>         RTE_FLOW_ITEM_TYPE_FUZZY,
> +
> +       /**
> +        * Matches a GTP header.
> +        *
> +        * Configure flow for GTP packets.
> +        *
> +        * See struct rte_flow_item_gtp.
> +        */
> +       RTE_FLOW_ITEM_TYPE_GTP,
> +
> +       /**
> +        * Matches a GTP header.
> +        *
> +        * Configure flow for GTP-C packets.
> +        *
> +        * See struct rte_flow_item_gtp.
> +        */
> +       RTE_FLOW_ITEM_TYPE_GTPC,
> +
> +       /**
> +        * Matches a GTP header.
> +        *
> +        * Configure flow for GTP-U packets.
> +        *
> +        * See struct rte_flow_item_gtp.
> +        */
> +       RTE_FLOW_ITEM_TYPE_GTPU,
>  };
>
>  /**
> @@ -735,6 +762,31 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
>  #endif
>
>  /**
> + * RTE_FLOW_ITEM_TYPE_GTP.
> + *
> + * Matches a GTPv1 header.
> + */
> +struct rte_flow_item_gtp {
> +       /**
> +        * Version (3b), protocol type (1b), reserved (1b),
> +        * Extension header flag (1b),
> +        * Sequence number flag (1b),
> +        * N-PDU number flag (1b).
> +        */
> +       uint8_t v_pt_rsv_flags;
> +       uint8_t msg_type; /**< Message type. */
> +       rte_be16_t msg_len; /**< Message length. */
> +       rte_be32_t teid; /**< Tunnel endpoint identifier. */
> +};
> +
> +/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
> +#ifndef __cplusplus
> +static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
> +       .teid = RTE_BE32(0xffffffff),
> +};
> +#endif
> +
> +/**
>   * Matching pattern item definition.
>   *
>   * A pattern is formed by stacking items starting from the lowest protocol
> --
> 2.5.5
>

Reviewed-by: Seán Harte <seanbh@gmail.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v8 1/7] mbuf: support GTP in software packet type parser
  2017-10-05  8:14                 ` [PATCH v8 1/7] mbuf: support GTP in software packet type parser Beilei Xing
@ 2017-10-05 11:50                   ` Sean Harte
  0 siblings, 0 replies; 116+ messages in thread
From: Sean Harte @ 2017-10-05 11:50 UTC (permalink / raw)
  To: Beilei Xing; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

On 5 October 2017 at 09:14, Beilei Xing <beilei.xing@intel.com> wrote:
> Add support of GTP-C and GTP-U tunnels in rte_net_get_ptype().
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Acked-by: Olivier Matz <olivier.matz@6wind.com>
> ---
>  lib/librte_mbuf/rte_mbuf_ptype.c |  2 ++
>  lib/librte_mbuf/rte_mbuf_ptype.h | 32 ++++++++++++++++++++++++++++++++
>  2 files changed, 34 insertions(+)
>
> diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
> index e5c4fae..a450814 100644
> --- a/lib/librte_mbuf/rte_mbuf_ptype.c
> +++ b/lib/librte_mbuf/rte_mbuf_ptype.c
> @@ -89,6 +89,8 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
>         case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
>         case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
>         case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
> +       case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
> +       case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
>         default: return "TUNNEL_UNKNOWN";
>         }
>  }
> diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
> index acd70bb..978c4a2 100644
> --- a/lib/librte_mbuf/rte_mbuf_ptype.h
> +++ b/lib/librte_mbuf/rte_mbuf_ptype.h
> @@ -383,6 +383,38 @@ extern "C" {
>   */
>  #define RTE_PTYPE_TUNNEL_GRENAT             0x00006000
>  /**
> + * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
> + * Packet format:
> + * <'ether type'=0x0800
> + * | 'version'=4, 'protocol'=17
> + * | 'destination port'=2123>
> + * or,
> + * <'ether type'=0x86DD
> + * | 'version'=6, 'next header'=17
> + * | 'destination port'=2123>
> + * or,
> + * <'ether type'=0x0800
> + * | 'version'=4, 'protocol'=17
> + * | 'source port'=2123>
> + * or,
> + * <'ether type'=0x86DD
> + * | 'version'=6, 'next header'=17
> + * | 'source port'=2123>
> + */
> +#define RTE_PTYPE_TUNNEL_GTPC               0x00007000
> +/**
> + * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
> + * Packet format:
> + * <'ether type'=0x0800
> + * | 'version'=4, 'protocol'=17
> + * | 'destination port'=2152>
> + * or,
> + * <'ether type'=0x86DD
> + * | 'version'=6, 'next header'=17
> + * | 'destination port'=2152>
> + */
> +#define RTE_PTYPE_TUNNEL_GTPU               0x00008000
> +/**
>   * Mask of tunneling packet types.
>   */
>  #define RTE_PTYPE_TUNNEL_MASK               0x0000f000
> --
> 2.5.5
>

Reviewed-by: Seán Harte <seanbh@gmail.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v8 5/7] net/i40e: add FDIR support for GTP-C and GTP-U
  2017-10-05  8:14                 ` [PATCH v8 5/7] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
@ 2017-10-05 11:50                   ` Sean Harte
  0 siblings, 0 replies; 116+ messages in thread
From: Sean Harte @ 2017-10-05 11:50 UTC (permalink / raw)
  To: Beilei Xing; +Cc: Wu, Jingjing, Chilikin, Andrey, dev

On 5 October 2017 at 09:14, Beilei Xing <beilei.xing@intel.com> wrote:
> This patch adds FDIR support for GTP-C and GTP-U. The
> input set of GTP-C and GTP-U is TEID.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/i40e/i40e_ethdev.h |  30 +++++
>  drivers/net/i40e/i40e_fdir.c   | 216 ++++++++++++++++++++++++---------
>  drivers/net/i40e/i40e_flow.c   | 267 +++++++++++++++++++++++++++++++++++------
>  3 files changed, 415 insertions(+), 98 deletions(-)
>
<snip>

Reviewed-by: Seán Harte <seanbh@gmail.com>

^ permalink raw reply	[flat|nested] 116+ messages in thread

* Re: [PATCH v8 0/7] net/i40e: GPT-C and GTP-U enabling
  2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
                                   ` (7 preceding siblings ...)
  2017-10-05  8:23                 ` [PATCH v8 0/7] net/i40e: GPT-C and GTP-U enabling Wu, Jingjing
@ 2017-10-05 21:13                 ` Ferruh Yigit
  8 siblings, 0 replies; 116+ messages in thread
From: Ferruh Yigit @ 2017-10-05 21:13 UTC (permalink / raw)
  To: Beilei Xing, jingjing.wu; +Cc: andrey.chilikin, dev

On 10/5/2017 9:14 AM, Beilei Xing wrote:
> This patch set enables RSS/FDIR/cloud filter for GPT-C and GTP-U.
> 

<...>

> 
> Beilei Xing (7):
>   mbuf: support GTP in software packet type parser
>   net/i40e: update ptype and pctype info
>   ethdev: add GTP items to support flow API
>   net/i40e: finish integration FDIR with generic flow API
>   net/i40e: add FDIR support for GTP-C and GTP-U
>   net/i40e: add cloud filter parsing function for GTP
>   net/i40e: enable cloud filter for GTP-C and GTP-U

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 116+ messages in thread

end of thread, other threads:[~2017-10-05 21:13 UTC | newest]

Thread overview: 116+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-25  7:50 [PATCH 0/7] GTP enabling Beilei Xing
2017-08-25  7:50 ` [PATCH 1/7] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
2017-09-07 11:20   ` [PATCH v2 0/6] GPT-C and GTP-U enabling Beilei Xing
2017-09-07 11:20     ` [PATCH v2 1/6] net/i40e: support RSS for GTP-C and GTP-U Beilei Xing
2017-09-18 14:17       ` Bruce Richardson
2017-09-18 14:21         ` Bruce Richardson
2017-09-07 11:20     ` [PATCH v2 2/6] ethdev: add GTPC and GTPU items Beilei Xing
2017-09-07 12:19       ` Adrien Mazarguil
2017-09-12  6:40         ` Xing, Beilei
2017-09-12 10:46           ` Adrien Mazarguil
2017-09-13  3:09             ` Xing, Beilei
2017-09-07 11:21     ` [PATCH v2 3/6] net/i40e: finish integration FDIR with generic flow API Beilei Xing
2017-09-07 11:21     ` [PATCH v2 4/6] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-09-07 11:21     ` [PATCH v2 5/6] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-09-07 11:21     ` [PATCH v2 6/6] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
2017-09-22 22:35     ` [PATCH v3 0/8] GPT-C and GTP-U enabling Beilei Xing
2017-09-22 22:35       ` [PATCH v3 1/8] mbuf: support GTP in software packet type parser Beilei Xing
2017-09-25  9:21         ` Olivier MATZ
2017-09-28  2:17         ` [PATCH v4 0/8] GPT-C and GTP-U enabling Beilei Xing
2017-09-28  2:17           ` [PATCH v4 1/8] mbuf: support GTP in software packet type parser Beilei Xing
2017-09-28  2:17           ` [PATCH v4 2/8] net/i40e: update ptype and pctype info Beilei Xing
2017-09-28  2:17           ` [PATCH v4 3/8] net/i40e: support RSS for new pctype Beilei Xing
2017-09-28  2:17           ` [PATCH v4 4/8] ethdev: add GTP items to support flow API Beilei Xing
2017-09-28  2:17           ` [PATCH v4 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
2017-09-28  2:17           ` [PATCH v4 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-09-28  2:17           ` [PATCH v4 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-09-28  2:17           ` [PATCH v4 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
2017-09-28  8:13           ` [PATCH v5 0/8] GPT-C and GTP-U enabling Beilei Xing
2017-09-28  8:13             ` [PATCH v5 1/8] mbuf: support GTP in software packet type parser Beilei Xing
2017-09-28  8:13             ` [PATCH v5 2/8] net/i40e: update ptype and pctype info Beilei Xing
2017-09-28  8:13             ` [PATCH v5 3/8] net/i40e: support RSS for new pctype Beilei Xing
2017-09-28  8:13             ` [PATCH v5 4/8] ethdev: add GTP items to support flow API Beilei Xing
2017-09-28 13:43               ` Sean Harte
2017-09-29  2:12                 ` Xing, Beilei
2017-09-28  8:13             ` [PATCH v5 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
2017-09-28  8:13             ` [PATCH v5 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-09-28  8:13             ` [PATCH v5 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-09-28  8:13             ` [PATCH v5 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
2017-09-29  5:18           ` [PATCH v6 0/8] GPT-C and GTP-U enabling Beilei Xing
2017-09-29  5:18             ` [PATCH v6 1/8] mbuf: support GTP in software packet type parser Beilei Xing
2017-09-29  8:15               ` Sean Harte
2017-09-29  8:41                 ` Xing, Beilei
2017-09-29  5:18             ` [PATCH v6 2/8] net/i40e: update ptype and pctype info Beilei Xing
2017-09-29 13:22               ` Wu, Jingjing
2017-09-29 13:24                 ` Xing, Beilei
2017-09-29  5:18             ` [PATCH v6 3/8] net/i40e: support RSS for new pctype Beilei Xing
2017-09-29 13:24               ` Wu, Jingjing
2017-09-29  5:18             ` [PATCH v6 4/8] ethdev: add GTP items to support flow API Beilei Xing
2017-09-29  8:15               ` Sean Harte
2017-09-29  8:54                 ` Xing, Beilei
2017-09-29  9:29                   ` Sean Harte
2017-09-29  9:37                     ` Xing, Beilei
2017-10-02 12:27                     ` Adrien Mazarguil
2017-10-03  8:56                       ` Sean Harte
2017-10-05  8:06                         ` Wu, Jingjing
2017-10-05  8:30                           ` Adrien Mazarguil
2017-10-05  8:39                             ` Wu, Jingjing
2017-09-29  5:18             ` [PATCH v6 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
2017-09-29 13:28               ` Wu, Jingjing
2017-09-29  5:19             ` [PATCH v6 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-09-29  8:15               ` Sean Harte
2017-09-29  9:33                 ` Xing, Beilei
2017-09-29 10:09                   ` Sean Harte
2017-09-29 11:30                     ` Xing, Beilei
2017-09-29 11:39                       ` Xing, Beilei
2017-09-29 13:14                     ` Xing, Beilei
2017-09-29 15:15                     ` Xing, Beilei
2017-09-29  5:19             ` [PATCH v6 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-09-29  5:19             ` [PATCH v6 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
2017-09-29 15:50             ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Beilei Xing
2017-09-29 15:50               ` [PATCH v7 1/8] mbuf: support GTP in software packet type parser Beilei Xing
2017-09-29 15:50               ` [PATCH v7 2/8] net/i40e: update ptype and pctype info Beilei Xing
2017-10-05  2:51                 ` Wu, Jingjing
2017-09-29 15:50               ` [PATCH v7 3/8] net/i40e: support RSS for new pctype Beilei Xing
2017-09-29 15:50               ` [PATCH v7 4/8] ethdev: add GTP items to support flow API Beilei Xing
2017-10-05  8:01                 ` Wu, Jingjing
2017-09-29 15:50               ` [PATCH v7 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
2017-10-05  2:52                 ` Wu, Jingjing
2017-09-29 15:50               ` [PATCH v7 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-10-05  3:09                 ` Wu, Jingjing
2017-09-29 15:50               ` [PATCH v7 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-10-05  3:13                 ` Wu, Jingjing
2017-09-29 15:50               ` [PATCH v7 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
2017-10-05  8:03                 ` Wu, Jingjing
2017-10-04 22:43               ` [PATCH v7 0/8] net/i40e: GPT-C and GTP-U enabling Ferruh Yigit
2017-10-05  8:14               ` [PATCH v8 0/7] " Beilei Xing
2017-10-05  8:14                 ` [PATCH v8 1/7] mbuf: support GTP in software packet type parser Beilei Xing
2017-10-05 11:50                   ` Sean Harte
2017-10-05  8:14                 ` [PATCH v8 2/7] net/i40e: update ptype and pctype info Beilei Xing
2017-10-05  8:14                 ` [PATCH v8 3/7] ethdev: add GTP items to support flow API Beilei Xing
2017-10-05 11:50                   ` Sean Harte
2017-10-05  8:14                 ` [PATCH v8 4/7] net/i40e: finish integration FDIR with generic " Beilei Xing
2017-10-05  8:14                 ` [PATCH v8 5/7] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-10-05 11:50                   ` Sean Harte
2017-10-05  8:14                 ` [PATCH v8 6/7] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-10-05  8:14                 ` [PATCH v8 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
2017-10-05  8:23                 ` [PATCH v8 0/7] net/i40e: GPT-C and GTP-U enabling Wu, Jingjing
2017-10-05 21:13                 ` Ferruh Yigit
2017-09-22 22:35       ` [PATCH v3 2/8] net/i40e: update ptype and pctype info Beilei Xing
2017-09-23  2:58         ` Wu, Jingjing
2017-09-22 22:35       ` [PATCH v3 3/8] net/i40e: support RSS for new pctype Beilei Xing
2017-09-22 22:35       ` [PATCH v3 4/8] ethdev: add GTP items to support flow API Beilei Xing
2017-09-22 13:39         ` Adrien Mazarguil
2017-09-22 22:35       ` [PATCH v3 5/8] net/i40e: finish integration FDIR with generic " Beilei Xing
2017-09-22 22:35       ` [PATCH v3 6/8] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-09-22 22:35       ` [PATCH v3 7/8] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-09-22 22:35       ` [PATCH v3 8/8] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing
2017-08-25  7:50 ` [PATCH 2/7] ethdev: add GTP item Beilei Xing
2017-09-06 16:02   ` Adrien Mazarguil
2017-09-07  6:31     ` Xing, Beilei
2017-08-25  7:50 ` [PATCH 3/7] app/testpmd: add GTP fields to flow command Beilei Xing
2017-09-06 16:03   ` Adrien Mazarguil
2017-08-25  7:50 ` [PATCH 4/7] net/i40e: finish integration FDIR with generic flow API Beilei Xing
2017-08-25  7:50 ` [PATCH 5/7] net/i40e: add FDIR support for GTP-C and GTP-U Beilei Xing
2017-08-25  7:50 ` [PATCH 6/7] net/i40e: add cloud filter parsing function for GTP Beilei Xing
2017-08-25  7:50 ` [PATCH 7/7] net/i40e: enable cloud filter for GTP-C and GTP-U Beilei Xing

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.