All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qiming Yang <qiming.yang@intel.com>
To: dev@dpdk.org
Cc: Qiming Yang <qiming.yang@intel.com>
Subject: [dpdk-dev] [PATCH v8 2/3] net/ice: add generic flow API
Date: Wed, 26 Jun 2019 16:03:03 +0800	[thread overview]
Message-ID: <20190626080304.4790-3-qiming.yang@intel.com> (raw)
In-Reply-To: <20190626080304.4790-1-qiming.yang@intel.com>

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 696 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 614 ++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   3 +-
 6 files changed, 1362 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..d5ff278
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if (!fields || fields & (~inset)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(struct rte_eth_dev *dev,
+				const struct rte_flow_action *actions,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint16_t queue;
+
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		act_q = actions->conf;
+		queue = act_q->index;
+		if (queue >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid queue ID for"
+					   " ethertype_filter.");
+			return -rte_errno;
+		}
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(dev, actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret = 0;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..2e43a29
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


  parent reply	other threads:[~2019-06-26  8:05 UTC|newest]

Thread overview: 73+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
2019-06-03  9:05 ` [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter Qiming Yang
2019-06-03 17:07   ` Aaron Conole
2019-06-04  2:02     ` Zhao1, Wei
2019-06-03  9:05 ` [dpdk-dev] [PATCH 2/2] net/ice: add generic flow API Qiming Yang
2019-06-12  7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
2019-06-13  8:23     ` Wang, Xiao W
2019-06-14  9:46       ` Zhao1, Wei
2019-06-17  8:28         ` Wang, Xiao W
2019-06-18  1:57           ` Zhao1, Wei
2019-06-17  5:27     ` Xing, Beilei
2019-06-17  8:23       ` Zhao1, Wei
2019-06-17  8:51       ` Zhao1, Wei
2019-06-18  1:50         ` Xing, Beilei
2019-06-18  9:40     ` Ye Xiaolong
2019-06-19  3:06       ` Zhao1, Wei
2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
2019-06-17  5:50     ` Xing, Beilei
2019-06-17  6:02     ` Xing, Beilei
2019-06-17  9:19     ` Wang, Xiao W
2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-20  5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter Qiming Yang
2019-06-20  9:01     ` Wang, Xiao W
2019-06-20  9:12       ` Zhao1, Wei
2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
2019-06-20  9:32     ` Wang, Xiao W
2019-06-21  5:47       ` Yang, Qiming
2019-06-20 10:21     ` Wang, Xiao W
2019-06-20 13:33     ` Aaron Conole
2019-06-21  2:18       ` Yang, Qiming
2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21  6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 1/3] net/ice: enable switch filter Qiming Yang
2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 2/3] net/ice: add generic flow API Qiming Yang
2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 1/3] net/ice: enable switch filter Qiming Yang
2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 2/3] net/ice: add generic flow API Qiming Yang
2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21 14:46   ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Aaron Conole
2019-06-24  6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter Qiming Yang
2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 2/3] net/ice: add generic flow API Qiming Yang
2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 1/3] net/ice: enable switch filter Qiming Yang
2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 2/3] net/ice: add generic flow API Qiming Yang
2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26  7:07     ` Xing, Beilei
2019-06-25 14:58   ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Aaron Conole
2019-06-26  1:52     ` Yang, Qiming
2019-06-26  7:42       ` Ferruh Yigit
2019-06-26  8:26         ` Yang, Qiming
2019-06-26 15:52   ` Ye Xiaolong
2019-06-26  8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 1/3] net/ice: enable switch filter Qiming Yang
2019-06-26  8:03   ` Qiming Yang [this message]
2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 1/4] net/ice: enable switch filter Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 2/4] net/ice: add generic flow API Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 3/4] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow Qiming Yang
2019-06-26 21:27     ` Thomas Monjalon
2019-06-27  2:04       ` Yang, Qiming
2019-06-26 13:25   ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Xing, Beilei
2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 1/3] net/ice: enable switch filter Qiming Yang
2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 2/3] net/ice: add generic flow API Qiming Yang
2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-07-01 11:38   ` [dpdk-dev] [PATCH v9 0/3] Enable rte_flow API in ice driver Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190626080304.4790-3-qiming.yang@intel.com \
    --to=qiming.yang@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.