All of lore.kernel.org
 help / color / mirror / Atom feed
From: Beilei Xing <beilei.xing@intel.com>
To: jingjing.wu@intel.com, helin.zhang@intel.com
Cc: dev@dpdk.org
Subject: [PATCH v5 08/17] net/i40e: parse flow director filter
Date: Wed,  4 Jan 2017 11:22:58 +0800	[thread overview]
Message-ID: <1483500187-124740-9-git-send-email-beilei.xing@intel.com> (raw)
In-Reply-To: <1483500187-124740-1-git-send-email-beilei.xing@intel.com>

This patch adds i40e_parse_fdir_filter to check if a rule
is a flow director rule according to the flow pattern,
and the function also gets the flow director info.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c |  56 +---
 drivers/net/i40e/i40e_ethdev.h |  55 ++++
 drivers/net/i40e/i40e_flow.c   | 607 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 663 insertions(+), 55 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index edfd52b..bcf28cf 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -139,60 +139,6 @@
 #define I40E_DEFAULT_DCB_APP_NUM    1
 #define I40E_DEFAULT_DCB_APP_PRIO   3
 
-#define I40E_INSET_NONE            0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC            0x0000000000000001ULL
-#define I40E_INSET_SMAC            0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER      0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER      0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL     0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC        0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST        0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC        0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST        0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT        0x0000000000001000ULL
-#define I40E_INSET_DST_PORT        0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT         0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS        0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO      0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL        0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC         0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW       0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS       0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST       0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST       0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC           0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
-	(I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
-	I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
-	I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
-	I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
 /**
  * Below are values for writing un-exposed registers suggested
  * by silicon experts
@@ -7617,7 +7563,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
 }
 
 /* default input set fields combination per pctype */
-static uint64_t
+uint64_t
 i40e_get_default_input_set(uint16_t pctype)
 {
 	static const uint64_t default_inset_table[] = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 23f360b..9e3a48d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -190,6 +190,60 @@ enum i40e_flxpld_layer_idx {
 #define FLOATING_VEB_SUPPORTED_FW_MAJ 5
 #define FLOATING_VEB_SUPPORTED_FW_MIN 0
 
+#define I40E_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC            0x0000000000000001ULL
+#define I40E_INSET_SMAC            0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER      0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER      0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL     0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC        0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST        0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC        0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST        0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT        0x0000000000001000ULL
+#define I40E_INSET_DST_PORT        0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT         0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS        0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL        0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC         0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW       0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS       0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST       0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST       0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC           0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+	(I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+	I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+	I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+	I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
 struct i40e_adapter;
 
 /**
@@ -712,6 +766,7 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
 			     const struct i40e_tunnel_filter_input *input);
 int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
 			      struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
 
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index a9ff73f..64b4ab6 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -51,6 +51,10 @@
 #include "base/i40e_type.h"
 #include "i40e_ethdev.h"
 
+#define I40E_IPV4_TC_SHIFT	4
+#define I40E_IPV6_TC_MASK	(0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER	44
+
 static int i40e_flow_validate(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
 			      const struct rte_flow_item pattern[],
@@ -64,6 +68,14 @@ static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
 				    const struct rte_flow_action *actions,
 				    struct rte_flow_error *error,
 				    struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+				   const struct rte_flow_item *pattern,
+				   struct rte_flow_error *error,
+				   struct rte_eth_fdir_filter *filter);
+static int i40e_parse_fdir_act(struct rte_eth_dev *dev,
+			       const struct rte_flow_action *actions,
+			       struct rte_flow_error *error,
+			       struct rte_eth_fdir_filter *filter);
 static int i40e_parse_attr(const struct rte_flow_attr *attr,
 			   struct rte_flow_error *error);
 
@@ -79,6 +91,107 @@ static enum rte_flow_item_type pattern_ethertype[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static int
 i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
 			    const struct rte_flow_attr *attr,
@@ -108,9 +221,62 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
 	return ret;
 }
 
+static int
+i40e_parse_fdir_filter(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error,
+		       union i40e_filter_t *filter)
+{
+	struct rte_eth_fdir_filter *fdir_filter =
+		&filter->fdir_filter;
+	int ret;
+
+	ret = i40e_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_parse_fdir_act(dev, actions, error, fdir_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	if (dev->data->dev_conf.fdir_conf.mode !=
+	    RTE_FDIR_MODE_PERFECT) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL,
+				   "Check the mode in fdir_conf.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
 static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	/* Ethertype */
 	{ pattern_ethertype, i40e_parse_ethertype_filter },
+	/* FDIR */
+	{ pattern_fdir_ipv4, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv4_ext, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv4_udp, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv4_udp_ext, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv4_tcp, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv4_tcp_ext, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv4_sctp, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv4_sctp_ext, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6_ext, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6_udp, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6_udp_ext, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6_tcp, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
+	{ pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
 };
 
 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
@@ -385,6 +551,447 @@ i40e_parse_ethertype_act(struct rte_eth_dev *dev,
 }
 
 static int
+i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+			const struct rte_flow_item *pattern,
+			struct rte_flow_error *error,
+			struct rte_eth_fdir_filter *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_vf *vf_spec;
+	uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+	enum i40e_filter_pctype pctype;
+	uint64_t input_set = I40E_INSET_NONE;
+	uint16_t flag_offset;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	uint32_t j;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = (const struct rte_flow_item_eth *)item->spec;
+			eth_mask = (const struct rte_flow_item_eth *)item->mask;
+			if (eth_spec || eth_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH spec/mask");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec =
+				(const struct rte_flow_item_ipv4 *)item->spec;
+			ipv4_mask =
+				(const struct rte_flow_item_ipv4 *)item->mask;
+			if (!ipv4_spec || !ipv4_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "NULL IPv4 spec/mask");
+				return -rte_errno;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.fragment_offset ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 mask.");
+				return -rte_errno;
+			}
+
+			if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+				input_set |= I40E_INSET_IPV4_SRC;
+			if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+				input_set |= I40E_INSET_IPV4_DST;
+			if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+				input_set |= I40E_INSET_IPV4_TOS;
+			if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+				input_set |= I40E_INSET_IPV4_TTL;
+			if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+				input_set |= I40E_INSET_IPV4_PROTO;
+
+			/* Get filter info */
+			flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+			/* Check if it is fragment. */
+			flag_offset =
+			      rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+			if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+			    flag_offset & IPV4_HDR_MF_FLAG)
+				flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+			/* Get the filter info */
+			filter->input.flow.ip4_flow.proto =
+				ipv4_spec->hdr.next_proto_id;
+			filter->input.flow.ip4_flow.tos =
+				ipv4_spec->hdr.type_of_service;
+			filter->input.flow.ip4_flow.ttl =
+				ipv4_spec->hdr.time_to_live;
+			filter->input.flow.ip4_flow.src_ip =
+				ipv4_spec->hdr.src_addr;
+			filter->input.flow.ip4_flow.dst_ip =
+				ipv4_spec->hdr.dst_addr;
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec =
+				(const struct rte_flow_item_ipv6 *)item->spec;
+			ipv6_mask =
+				(const struct rte_flow_item_ipv6 *)item->mask;
+			if (!ipv6_spec || !ipv6_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "NULL IPv6 spec/mask");
+				return -rte_errno;
+			}
+
+			/* Check IPv6 mask and update input set */
+			if (ipv6_mask->hdr.payload_len) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv6 mask");
+				return -rte_errno;
+			}
+
+			/* SCR and DST address of IPv6 shouldn't be masked */
+			for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+				if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+				    ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+			}
+
+			input_set |= I40E_INSET_IPV6_SRC;
+			input_set |= I40E_INSET_IPV6_DST;
+
+			if ((ipv6_mask->hdr.vtc_flow &
+			     rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+			    == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+				input_set |= I40E_INSET_IPV6_TC;
+			if (ipv6_mask->hdr.proto == UINT8_MAX)
+				input_set |= I40E_INSET_IPV6_NEXT_HDR;
+			if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+				input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+			/* Get filter info */
+			filter->input.flow.ipv6_flow.tc =
+				(uint8_t)(ipv6_spec->hdr.vtc_flow <<
+					  I40E_IPV4_TC_SHIFT);
+			filter->input.flow.ipv6_flow.proto =
+				ipv6_spec->hdr.proto;
+			filter->input.flow.ipv6_flow.hop_limits =
+				ipv6_spec->hdr.hop_limits;
+
+			rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+				   ipv6_spec->hdr.src_addr, 16);
+			rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+				   ipv6_spec->hdr.dst_addr, 16);
+
+			/* Check if it is fragment. */
+			if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+				flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+			else
+				flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+			tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+			if (!tcp_spec || !tcp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "NULL TCP spec/mask");
+				return -rte_errno;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return -rte_errno;
+			}
+
+			if (tcp_mask->hdr.src_port != UINT16_MAX ||
+			    tcp_mask->hdr.dst_port != UINT16_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return -rte_errno;
+			}
+
+			input_set |= I40E_INSET_SRC_PORT;
+			input_set |= I40E_INSET_DST_PORT;
+
+			/* Get filter info */
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+				flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				filter->input.flow.tcp4_flow.src_port =
+					tcp_spec->hdr.src_port;
+				filter->input.flow.tcp4_flow.dst_port =
+					tcp_spec->hdr.dst_port;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				filter->input.flow.tcp6_flow.src_port =
+					tcp_spec->hdr.src_port;
+				filter->input.flow.tcp6_flow.dst_port =
+					tcp_spec->hdr.dst_port;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = (const struct rte_flow_item_udp *)item->spec;
+			udp_mask = (const struct rte_flow_item_udp *)item->mask;
+			if (!udp_spec || !udp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "NULL UDP spec/mask");
+				return -rte_errno;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return -rte_errno;
+			}
+
+			if (udp_mask->hdr.src_port != UINT16_MAX ||
+			    udp_mask->hdr.dst_port != UINT16_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return -rte_errno;
+			}
+
+			input_set |= I40E_INSET_SRC_PORT;
+			input_set |= I40E_INSET_DST_PORT;
+
+			/* Get filter info */
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+				flow_type =
+					RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+				flow_type =
+					RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				filter->input.flow.udp4_flow.src_port =
+					udp_spec->hdr.src_port;
+				filter->input.flow.udp4_flow.dst_port =
+					udp_spec->hdr.dst_port;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				filter->input.flow.udp6_flow.src_port =
+					udp_spec->hdr.src_port;
+				filter->input.flow.udp6_flow.dst_port =
+					udp_spec->hdr.dst_port;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec =
+				(const struct rte_flow_item_sctp *)item->spec;
+			sctp_mask =
+				(const struct rte_flow_item_sctp *)item->mask;
+			if (!sctp_spec || !sctp_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "NULL SCTP spec/mask");
+				return -rte_errno;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return -rte_errno;
+			}
+
+			if (sctp_mask->hdr.src_port != UINT16_MAX ||
+			    sctp_mask->hdr.dst_port != UINT16_MAX ||
+			    sctp_mask->hdr.tag != UINT32_MAX) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return -rte_errno;
+			}
+			input_set |= I40E_INSET_SRC_PORT;
+			input_set |= I40E_INSET_DST_PORT;
+			input_set |= I40E_INSET_SCTP_VT;
+
+			/* Get filter info */
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+				flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+				flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+				filter->input.flow.sctp4_flow.src_port =
+					sctp_spec->hdr.src_port;
+				filter->input.flow.sctp4_flow.dst_port =
+					sctp_spec->hdr.dst_port;
+				filter->input.flow.sctp4_flow.verify_tag =
+					sctp_spec->hdr.tag;
+			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+				filter->input.flow.sctp6_flow.src_port =
+					sctp_spec->hdr.src_port;
+				filter->input.flow.sctp6_flow.dst_port =
+					sctp_spec->hdr.dst_port;
+				filter->input.flow.sctp6_flow.verify_tag =
+					sctp_spec->hdr.tag;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VF:
+			vf_spec = (const struct rte_flow_item_vf *)item->spec;
+			filter->input.flow_ext.is_vf = 1;
+			filter->input.flow_ext.dst_id = vf_spec->id;
+			if (filter->input.flow_ext.is_vf &&
+			    filter->input.flow_ext.dst_id >= pf->vf_num) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid VF ID for FDIR.");
+				return -rte_errno;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	pctype = i40e_flowtype_to_pctype(flow_type);
+	if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, item,
+				   "Unsupported flow type");
+		return -rte_errno;
+	}
+
+	if (input_set != i40e_get_default_input_set(pctype)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, item,
+				   "Invalid input set.");
+		return -rte_errno;
+	}
+	filter->input.flow_type = flow_type;
+
+	return 0;
+}
+
+/* Parse to get the action info of a FDIR filter */
+static int
+i40e_parse_fdir_act(struct rte_eth_dev *dev,
+		    const struct rte_flow_action *actions,
+		    struct rte_flow_error *error,
+		    struct rte_eth_fdir_filter *filter)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec;
+	uint32_t index = 0;
+
+	/* Check if the first non-void action is QUEUE or DROP. */
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Invalid action.");
+		return -rte_errno;
+	}
+
+	act_q = (const struct rte_flow_action_queue *)act->conf;
+	filter->action.flex_off = 0;
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+		filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+	else
+		filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+	filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+	filter->action.rx_queue = act_q->index;
+
+	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, act,
+				   "Invalid queue ID for FDIR.");
+		return -rte_errno;
+	}
+
+	/* Check if the next non-void item is MARK or END. */
+	index++;
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+	    act->type != RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Invalid action.");
+		return -rte_errno;
+	}
+
+	if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+		mark_spec = (const struct rte_flow_action_mark *)act->conf;
+		filter->soft_id = mark_spec->id;
+
+		/* Check if the next non-void item is END */
+		index++;
+		NEXT_ITEM_OF_ACTION(act, actions, index);
+		if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   act, "Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	return 0;
+}
+
+static int
 i40e_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_attr *attr,
 		   const struct rte_flow_item pattern[],
-- 
2.5.5

  parent reply	other threads:[~2017-01-04  3:23 UTC|newest]

Thread overview: 175+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-02 11:53 [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
2016-12-02 11:53 ` [PATCH 01/24] net/i40e: store ethertype filter Beilei Xing
2016-12-02 11:53 ` [PATCH 02/24] net/i40e: store tunnel filter Beilei Xing
2016-12-02 11:53 ` [PATCH 03/24] net/i40e: store flow director filter Beilei Xing
2016-12-02 11:53 ` [PATCH 04/24] net/i40e: store RSS hash info Beilei Xing
2016-12-02 11:53 ` [PATCH 05/24] net/i40e: restore ethertype filter Beilei Xing
2016-12-02 11:53 ` [PATCH 06/24] net/i40e: restore macvlan filter Beilei Xing
2016-12-02 11:53 ` [PATCH 07/24] net/i40e: restore tunnel filter Beilei Xing
2016-12-02 11:53 ` [PATCH 08/24] net/i40e: restore flow director filter Beilei Xing
2016-12-02 11:53 ` [PATCH 09/24] net/i40e: restore RSS hash info Beilei Xing
2016-12-02 11:53 ` [PATCH 10/24] ethdev: parse ethertype filter Beilei Xing
2016-12-20 18:12   ` Ferruh Yigit
2016-12-21  3:54     ` Xing, Beilei
2016-12-23  8:43       ` Adrien Mazarguil
2016-12-27  6:36         ` Xing, Beilei
2016-12-02 11:53 ` [PATCH 11/24] net/i40e: add flow validate function Beilei Xing
2016-12-02 11:53 ` [PATCH 12/24] net/i40e: parse macvlan filter Beilei Xing
2016-12-02 11:53 ` [PATCH 13/24] net/i40e: parse VXLAN filter Beilei Xing
2016-12-02 11:53 ` [PATCH 14/24] net/i40e: parse NVGRE filter Beilei Xing
2016-12-02 11:53 ` [PATCH 15/24] net/i40e: parse flow director filter Beilei Xing
2016-12-02 11:53 ` [PATCH 16/24] net/i40e: add flow create function Beilei Xing
2016-12-02 11:53 ` [PATCH 17/24] net/i40e: destroy ethertype filter Beilei Xing
2016-12-02 11:53 ` [PATCH 18/24] net/i40e: destroy macvlan filter Beilei Xing
2016-12-02 11:53 ` [PATCH 19/24] net/i40e: destroy tunnel filter Beilei Xing
2016-12-02 11:53 ` [PATCH 20/24] net/i40e: destroy flow directory filter Beilei Xing
2016-12-02 11:53 ` [PATCH 21/24] net/i40e: add flow flush function Beilei Xing
2016-12-02 11:53 ` [PATCH 22/24] net/i40e: flush ethertype filters Beilei Xing
2016-12-02 11:53 ` [PATCH 23/24] net/i40e: flush macvlan filters Beilei Xing
2016-12-02 11:53 ` [PATCH 24/24] net/i40e: flush tunnel filters Beilei Xing
2016-12-27  6:26 ` [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
2016-12-27  6:26   ` [PATCH v2 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-28  2:22     ` Wu, Jingjing
2016-12-29  4:03       ` Xing, Beilei
2016-12-29  4:36       ` Xing, Beilei
2016-12-28  3:22     ` Tiwei Bie
2016-12-27  6:26   ` [PATCH v2 02/17] net/i40e: store tunnel filter Beilei Xing
2016-12-28  3:27     ` Tiwei Bie
2016-12-27  6:26   ` [PATCH v2 03/17] net/i40e: store flow director filter Beilei Xing
2016-12-28  3:38     ` Tiwei Bie
2016-12-28  7:10       ` Xing, Beilei
2016-12-28  7:14         ` Tiwei Bie
2016-12-28  7:36           ` Tiwei Bie
2016-12-27  6:26   ` [PATCH v2 04/17] net/i40e: restore ethertype filter Beilei Xing
2016-12-28  2:25     ` Wu, Jingjing
2016-12-27  6:26   ` [PATCH v2 05/17] net/i40e: restore tunnel filter Beilei Xing
2016-12-27  6:26   ` [PATCH v2 06/17] net/i40e: restore flow director filter Beilei Xing
2016-12-27  6:26   ` [PATCH v2 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-27 12:40     ` Adrien Mazarguil
2016-12-28  9:00       ` Xing, Beilei
2016-12-28  9:29         ` Adrien Mazarguil
2016-12-28 10:03           ` Xing, Beilei
2016-12-28  2:52     ` Wu, Jingjing
2016-12-28  7:44       ` Xing, Beilei
2016-12-28  4:08     ` Tiwei Bie
2016-12-27  6:26   ` [PATCH v2 08/17] net/i40e: parse flow director filter Beilei Xing
2016-12-27  6:26   ` [PATCH v2 09/17] net/i40e: parse tunnel filter Beilei Xing
2016-12-27  6:26   ` [PATCH v2 10/17] net/i40e: add flow create function Beilei Xing
2016-12-27  6:26   ` [PATCH v2 11/17] net/i40e: add flow destroy function Beilei Xing
2016-12-27  6:26   ` [PATCH v2 12/17] net/i40e: destroy ethertype filter Beilei Xing
2016-12-28  3:30     ` Wu, Jingjing
2016-12-28  7:29       ` Xing, Beilei
2016-12-28  4:56     ` Tiwei Bie
2016-12-28  6:57       ` Xing, Beilei
2016-12-27  6:26   ` [PATCH v2 13/17] net/i40e: destroy tunnel filter Beilei Xing
2016-12-27  6:26   ` [PATCH v2 14/17] net/i40e: destroy flow directory filter Beilei Xing
2016-12-27  6:26   ` [PATCH v2 15/17] net/i40e: add flow flush function Beilei Xing
2016-12-27 12:40     ` Adrien Mazarguil
2016-12-28  8:02       ` Xing, Beilei
2016-12-28  5:35     ` Tiwei Bie
2016-12-28  6:48       ` Xing, Beilei
2016-12-28  7:00         ` Tiwei Bie
2016-12-28  7:20           ` Xing, Beilei
2016-12-27  6:26   ` [PATCH v2 16/17] net/i40e: flush ethertype filters Beilei Xing
2016-12-27  6:26   ` [PATCH v2 17/17] net/i40e: flush tunnel filters Beilei Xing
2016-12-29 16:04   ` [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-29 16:04     ` [PATCH v3 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 02/17] net/i40e: store tunnel filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 03/17] net/i40e: store flow director filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 04/17] net/i40e: restore ethertype filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 05/17] net/i40e: restore tunnel filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 06/17] net/i40e: restore flow director filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-29 16:04     ` [PATCH v3 08/17] net/i40e: parse flow director filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 09/17] net/i40e: parse tunnel filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 10/17] net/i40e: add flow create function Beilei Xing
2016-12-29 16:04     ` [PATCH v3 11/17] net/i40e: add flow destroy function Beilei Xing
2016-12-29 16:04     ` [PATCH v3 12/17] net/i40e: destroy ethertype filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 13/17] net/i40e: destroy tunnel filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 14/17] net/i40e: destroy flow directory filter Beilei Xing
2016-12-29 16:04     ` [PATCH v3 15/17] net/i40e: add flow flush function Beilei Xing
2016-12-29 16:04     ` [PATCH v3 16/17] net/i40e: flush ethertype filters Beilei Xing
2016-12-29 16:04     ` [PATCH v3 17/17] net/i40e: flush tunnel filters Beilei Xing
2016-12-30  3:25     ` [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-30  3:25       ` [PATCH v4 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 02/17] net/i40e: store tunnel filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 03/17] net/i40e: store flow director filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 04/17] net/i40e: restore ethertype filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 05/17] net/i40e: restore tunnel filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 06/17] net/i40e: restore flow director filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-30  3:25       ` [PATCH v4 08/17] net/i40e: parse flow director filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 09/17] net/i40e: parse tunnel filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 10/17] net/i40e: add flow create function Beilei Xing
2016-12-30  3:25       ` [PATCH v4 11/17] net/i40e: add flow destroy function Beilei Xing
2016-12-30  3:25       ` [PATCH v4 12/17] net/i40e: destroy ethertype filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 13/17] net/i40e: destroy tunnel filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 14/17] net/i40e: destroy flow directory filter Beilei Xing
2016-12-30  3:25       ` [PATCH v4 15/17] net/i40e: add flow flush function Beilei Xing
2016-12-30  3:25       ` [PATCH v4 16/17] net/i40e: flush ethertype filters Beilei Xing
2016-12-30  3:25       ` [PATCH v4 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-03  3:25         ` Guo, Jia
2017-01-03  4:49           ` Xing, Beilei
2017-01-04  3:22       ` [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
2017-01-04  3:22         ` [PATCH v5 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-04  3:22         ` [PATCH v5 02/17] net/i40e: store tunnel filter Beilei Xing
2017-01-04  3:22         ` [PATCH v5 03/17] net/i40e: store flow director filter Beilei Xing
2017-01-04  3:22         ` [PATCH v5 04/17] net/i40e: restore ethertype filter Beilei Xing
2017-01-04  3:22         ` [PATCH v5 05/17] net/i40e: restore tunnel filter Beilei Xing
2017-01-04  3:22         ` [PATCH v5 06/17] net/i40e: restore flow director filter Beilei Xing
2017-01-04  3:22         ` [PATCH v5 07/17] net/i40e: add flow validate function Beilei Xing
2017-01-04 18:57           ` Ferruh Yigit
2017-01-05  6:08             ` Xing, Beilei
2017-01-05 11:16               ` Ferruh Yigit
2017-01-05 11:52                 ` Xing, Beilei
2017-01-04  3:22         ` Beilei Xing [this message]
2017-01-04  3:22         ` [PATCH v5 09/17] net/i40e: parse tunnel filter Beilei Xing
2017-01-04  3:23         ` [PATCH v5 10/17] net/i40e: add flow create function Beilei Xing
2017-01-04  3:23         ` [PATCH v5 11/17] net/i40e: add flow destroy function Beilei Xing
2017-01-04  3:23         ` [PATCH v5 12/17] net/i40e: destroy ethertype filter Beilei Xing
2017-01-04  3:23         ` [PATCH v5 13/17] net/i40e: destroy tunnel filter Beilei Xing
2017-01-04  3:23         ` [PATCH v5 14/17] net/i40e: destroy flow directory filter Beilei Xing
2017-01-04  3:23         ` [PATCH v5 15/17] net/i40e: add flow flush function Beilei Xing
2017-01-04  3:23         ` [PATCH v5 16/17] net/i40e: flush ethertype filters Beilei Xing
2017-01-04  3:23         ` [PATCH v5 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-04  6:40         ` [PATCH v5 00/17] net/i40e: consistent filter API Wu, Jingjing
2017-01-05 15:45         ` [PATCH v6 " Beilei Xing
2017-01-05 15:45           ` [PATCH v6 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-05 17:46             ` Ferruh Yigit
2017-01-05 15:45           ` [PATCH v6 02/17] net/i40e: store tunnel filter Beilei Xing
2017-01-05 15:45           ` [PATCH v6 03/17] net/i40e: store flow director filter Beilei Xing
2017-01-05 15:45           ` [PATCH v6 04/17] net/i40e: restore ethertype filter Beilei Xing
2017-01-05 15:45           ` [PATCH v6 05/17] net/i40e: restore tunnel filter Beilei Xing
2017-01-05 15:45           ` [PATCH v6 06/17] net/i40e: restore flow director filter Beilei Xing
2017-01-05 15:46           ` [PATCH v6 07/17] net/i40e: add flow validate function Beilei Xing
2017-01-05 15:46           ` [PATCH v6 08/17] net/i40e: parse flow director filter Beilei Xing
2017-01-05 15:46           ` [PATCH v6 09/17] net/i40e: parse tunnel filter Beilei Xing
2017-01-05 15:46           ` [PATCH v6 10/17] net/i40e: add flow create function Beilei Xing
2017-01-05 17:47             ` Ferruh Yigit
2017-01-05 15:46           ` [PATCH v6 11/17] net/i40e: add flow destroy function Beilei Xing
2017-01-05 15:46           ` [PATCH v6 12/17] net/i40e: destroy ethertype filter Beilei Xing
2017-01-05 15:46           ` [PATCH v6 13/17] net/i40e: destroy tunnel filter Beilei Xing
2017-01-05 15:46           ` [PATCH v6 14/17] net/i40e: destroy flow directory filter Beilei Xing
2017-01-05 15:46           ` [PATCH v6 15/17] net/i40e: add flow flush function Beilei Xing
2017-01-05 15:46           ` [PATCH v6 16/17] net/i40e: flush ethertype filters Beilei Xing
2017-01-05 15:46           ` [PATCH v6 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-05 17:46           ` [PATCH v6 00/17] net/i40e: consistent filter API Ferruh Yigit
2017-01-06  5:27           ` [PATCH v7 " Beilei Xing
2017-01-06  5:27             ` [PATCH v7 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 02/17] net/i40e: store tunnel filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 03/17] net/i40e: store flow director filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 04/17] net/i40e: restore ethertype filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 05/17] net/i40e: restore tunnel filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 06/17] net/i40e: restore flow director filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 07/17] net/i40e: add flow validate function Beilei Xing
2017-01-06  5:27             ` [PATCH v7 08/17] net/i40e: parse flow director filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 09/17] net/i40e: parse tunnel filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 10/17] net/i40e: add flow create function Beilei Xing
2017-01-06  5:27             ` [PATCH v7 11/17] net/i40e: add flow destroy function Beilei Xing
2017-01-06  5:27             ` [PATCH v7 12/17] net/i40e: destroy ethertype filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 13/17] net/i40e: destroy tunnel filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 14/17] net/i40e: destroy flow directory filter Beilei Xing
2017-01-06  5:27             ` [PATCH v7 15/17] net/i40e: add flow flush function Beilei Xing
2017-01-06  5:27             ` [PATCH v7 16/17] net/i40e: flush ethertype filters Beilei Xing
2017-01-06  5:27             ` [PATCH v7 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-06 11:54             ` [PATCH v7 00/17] net/i40e: consistent filter API Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1483500187-124740-9-git-send-email-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=helin.zhang@intel.com \
    --cc=jingjing.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.