All of lore.kernel.org
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy
@ 2020-03-18  5:41 Simei Su
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule Simei Su
                   ` (6 more replies)
  0 siblings, 7 replies; 43+ messages in thread
From: Simei Su @ 2020-03-18  5:41 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, yahui.cao, jingjing.wu, simei.su

[PATCH 1/5] support FDIR common patterns and actions.
[PATCH 2/5] support FDIR GTPU pattern.
[PATCH 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
[PATCH 4/5] support FDIR PFCP node and session pattern.
[PATCH 5/5] support FDIR mark action.

This patchset depend on the following patches on patchwork:
(1)https://patchwork.dpdk.org/patch/66764/
    [1/2] net/iavf: support generic flow
(2)https://patchwork.dpdk.org/patch/66765/
    [2/2] net/iavf: support more patterns
(3)https://patchwork.dpdk.org/patch/66682/
    [07/12] net/iavf: add flow director enabled switch value
(4)https://patchwork.dpdk.org/patch/66683/
    [08/12] net/iavf: support flow mark in normal data path
(5)https://patchwork.dpdk.org/patch/66684/
    [09/12] net/iavf: support flow mark in AVX path
(6)https://patchwork.dpdk.org/patch/66685/
    [10/12] net/iavf: support flow mark in SSE path

Simei Su (5):
  net/iavf: add support for FDIR basic rule
  net/iavf: add support for FDIR GTPU
  net/iavf: add support for FDIR L2TPv3 and IPSec
  net/iavf: add support for FDIR PFCP
  net/iavf: add support for FDIR mark action

 drivers/net/iavf/Makefile     |   1 +
 drivers/net/iavf/iavf.h       |  17 +
 drivers/net/iavf/iavf_fdir.c  | 999 ++++++++++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c | 128 +++++-
 drivers/net/iavf/meson.build  |   1 +
 5 files changed, 1145 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule
  2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
@ 2020-03-18  5:41 ` Simei Su
  2020-03-31  5:20   ` Cao, Yahui
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU Simei Su
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 43+ messages in thread
From: Simei Su @ 2020-03-18  5:41 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, yahui.cao, jingjing.wu, simei.su

This patch adds FDIR create/destroy/validate function in AVF.
Common pattern and queue/qgroup/passthru/drop actions are supported.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/Makefile     |   1 +
 drivers/net/iavf/iavf.h       |  16 +
 drivers/net/iavf/iavf_fdir.c  | 762 ++++++++++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c | 128 ++++++-
 drivers/net/iavf/meson.build  |   1 +
 5 files changed, 907 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
index 1bf0f26..193bc55 100644
--- a/drivers/net/iavf/Makefile
+++ b/drivers/net/iavf/Makefile
@@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
 ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
 endif
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 48b9509..62a3eb8 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -99,6 +99,16 @@ struct iavf_vsi {
 struct iavf_flow_parser_node;
 TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
 
+struct iavf_fdir_conf {
+	struct virtchnl_fdir_fltr input;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
+struct iavf_fdir_info {
+	struct iavf_fdir_conf conf;
+};
+
 /* TODO: is that correct to assume the max number to be 16 ?*/
 #define IAVF_MAX_MSIX_VECTORS   16
 
@@ -138,6 +148,8 @@ struct iavf_info {
 	struct iavf_flow_list flow_list;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+
+	struct iavf_fdir_info fdir; /* flow director info */
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -260,4 +272,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
 int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
 			 struct rte_ether_addr *addr, bool add);
 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
new file mode 100644
index 0000000..dd321ba
--- /dev/null
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -0,0 +1,762 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+#include "virtchnl.h"
+
+#define IAVF_FDIR_MAX_QREGION_SIZE 128
+
+#define IAVF_FDIR_IPV6_TC_OFFSET 20
+#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
+
+#define IAVF_FDIR_INSET_ETH (\
+	IAVF_INSET_ETHERTYPE)
+
+#define IAVF_FDIR_INSET_ETH_IPV4 (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6 (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+};
+
+static struct iavf_flow_parser iavf_fdir_parser;
+
+static int
+iavf_fdir_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+		parser = &iavf_fdir_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fdir_uninit(struct iavf_adapter *ad)
+{
+	struct iavf_flow_parser *parser;
+
+	parser = &iavf_fdir_parser;
+
+	iavf_unregister_parser(parser, ad);
+}
+
+static int
+iavf_fdir_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	struct iavf_fdir_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory");
+		return -rte_errno;
+	}
+
+	ret = iavf_fdir_add(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Add filter rule failed.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+iavf_fdir_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fdir_conf *)flow->rule;
+
+	ret = iavf_fdir_del(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Del filter rule failed.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return 0;
+}
+
+static int
+iavf_fdir_validation(struct iavf_adapter *ad,
+		__rte_unused struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	int ret;
+
+	ret = iavf_fdir_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Validate filter rule failed.");
+		return -rte_errno;
+	}
+
+	return 0;
+};
+
+static struct iavf_flow_engine iavf_fdir_engine = {
+	.init = iavf_fdir_init,
+	.uninit = iavf_fdir_uninit,
+	.create = iavf_fdir_create,
+	.destroy = iavf_fdir_destroy,
+	.validation = iavf_fdir_validation,
+	.type = IAVF_FLOW_ENGINE_FDIR,
+};
+
+static int
+iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
+			struct rte_flow_error *error,
+			const struct rte_flow_action *act,
+			struct virtchnl_filter_action *filter_action)
+{
+	const struct rte_flow_action_rss *rss = act->conf;
+	uint32_t i;
+
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid action.");
+		return -rte_errno;
+	}
+
+	if (rss->queue_num <= 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Queue region size can't be 0 or 1.");
+		return -rte_errno;
+	}
+
+	/* check if queue index for queue region is continuous */
+	for (i = 0; i < rss->queue_num - 1; i++) {
+		if (rss->queue[i + 1] != rss->queue[i] + 1) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, act,
+					"Discontinuous queue region");
+			return -rte_errno;
+		}
+	}
+
+	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid queue region indexes.");
+		return -rte_errno;
+	}
+
+	if (!(rte_is_power_of_2(rss->queue_num) &&
+		(rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"The region size should be any of the following values:"
+				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+				"of queues do not exceed the VSI allocation.");
+		return -rte_errno;
+	}
+
+	filter_action->q_index = rss->queue[0];
+	filter_action->q_region = rte_fls_u32(rss->queue_num) - 1;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_action(struct iavf_adapter *ad,
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint32_t dest_num = 0;
+	int ret;
+
+	int number = 0;
+	struct virtchnl_filter_action *filter_action;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+
+			filter_action = &filter->input.rule_cfg.
+					action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU;
+
+			filter->input.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter_action = &filter->input.rule_cfg.
+					action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_FDIR_ACT_DROP;
+
+			filter->input.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+
+			act_q = actions->conf;
+			filter_action = &filter->input.rule_cfg.
+					action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_FDIR_ACT_QUEUE;
+			filter_action->q_index = act_q->index;
+
+			if (filter_action->q_index >=
+				ad->eth_dev->data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue for FDIR.");
+				return -rte_errno;
+			}
+
+			filter->input.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+
+			filter_action = &filter->input.rule_cfg.
+					action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_FDIR_ACT_Q_REGION;
+
+			ret = iavf_fdir_parse_action_qregion(ad,
+						error, actions, filter_action);
+			if (ret)
+				return ret;
+
+			filter->input.rule_cfg.action_set.count = ++number;
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, actions,
+					"Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
+			const struct rte_flow_item pattern[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = IAVF_INSET_NONE;
+
+	enum rte_flow_item_type next_type;
+	uint16_t ether_type;
+
+	int layer = 0;
+	struct virtchnl_proto_hdr *hdr;
+
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not support range");
+		}
+
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			next_type = (item + 1)->type;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+				(!eth_spec || !eth_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "NULL eth spec/mask.");
+				return -rte_errno;
+			}
+
+			if (eth_spec && eth_mask) {
+				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid MAC_addr mask.");
+					return -rte_errno;
+				}
+			}
+
+			if (eth_spec && eth_mask && eth_mask->type) {
+				if (eth_mask->type != RTE_BE16(0xffff)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid type mask.");
+					return -rte_errno;
+				}
+
+				ether_type = rte_be_to_cpu_16(eth_spec->type);
+				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+					ether_type == RTE_ETHER_TYPE_IPV6) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Unsupported ether_type.");
+					return -rte_errno;
+				}
+
+				input_set |= IAVF_INSET_ETHERTYPE;
+				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
+					ETH, ETHERTYPE);
+
+				rte_memcpy(hdr->buffer,
+					eth_spec, sizeof(*eth_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.fragment_offset ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+
+				if (ipv4_mask->hdr.type_of_service ==
+								UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TOS;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV4, DSCP);
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_PROTO;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV4, PROT);
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TTL;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV4, TTL);
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV4, SRC);
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV4, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv4_spec->hdr,
+					sizeof(ipv4_spec->hdr));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+
+				if ((ipv6_mask->hdr.vtc_flow &
+					rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+					== rte_cpu_to_be_32(
+							IAVF_IPV6_TC_MASK)) {
+					input_set |= IAVF_INSET_IPV6_TC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV6, TC);
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV6, PROT);
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV6, HOP_LIMIT);
+				}
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.src_addr))) {
+					input_set |= IAVF_INSET_IPV6_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV6, SRC);
+				}
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+					input_set |= IAVF_INSET_IPV6_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, IPV6, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv6_spec->hdr,
+					sizeof(ipv6_spec->hdr));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				if (udp_mask->hdr.dgram_len ||
+					udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, UDP, SRC_PORT);
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, UDP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, TCP, SRC_PORT);
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, TCP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+			if (sctp_spec && sctp_mask) {
+				if (sctp_mask->hdr.cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, SCTP, SRC_PORT);
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, SCTP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_fdir_conf *filter = &vf->fdir.conf;
+	struct iavf_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		goto error;
+
+	input_set = filter->input_set;
+	if (!input_set || input_set & ~item->input_set_mask) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
+				"Invalid input set");
+		ret = -rte_errno;
+		goto error;
+	}
+
+	ret = iavf_fdir_parse_action(ad, actions, error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(item);
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_fdir_parser = {
+	.engine = &iavf_fdir_engine,
+	.array = iavf_fdir_pattern,
+	.array_len = RTE_DIM(iavf_fdir_pattern),
+	.parse_pattern_action = iavf_fdir_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fdir_engine_register)
+{
+	iavf_register_flow_engine(&iavf_fdir_engine);
+}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 11c70f5..77bfd1b 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -342,7 +342,8 @@
 
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
-		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+		VIRTCHNL_VF_OFFLOAD_FDIR_PF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -867,3 +868,128 @@
 
 	return err;
 }
+
+int
+iavf_fdir_add(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_status *fdir_status;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->input.vsi_id = vf->vsi_res->vsi_id;
+	filter->input.validate_only = 0;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->input);
+	args.in_args_size = sizeof(*(&filter->input));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
+	filter->flow_id = fdir_status->flow_id;
+
+	if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
+		PMD_DRV_LOG(INFO,
+			"add rule request is successfully done by PF");
+	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE)
+		PMD_DRV_LOG(INFO,
+			"add rule request is failed due to no hw resource");
+	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT)
+		PMD_DRV_LOG(INFO,
+			"add rule request is failed due to the rule is already existed");
+	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
+		PMD_DRV_LOG(INFO,
+			"add rule request is failed due to the hw doesn't support");
+	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
+		PMD_DRV_LOG(INFO,
+			"add rule request is failed due to time out for programming");
+
+	return 0;
+};
+
+int
+iavf_fdir_del(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_status *fdir_status;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->input.vsi_id = vf->vsi_res->vsi_id;
+	filter->input.flow_id = filter->flow_id;
+
+	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->input);
+	args.in_args_size = sizeof(filter->input);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
+
+	if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
+		PMD_DRV_LOG(INFO,
+			"delete rule request is successfully done by PF");
+	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST)
+		PMD_DRV_LOG(INFO,
+			"delete rule request is failed due to this rule doesn't exist");
+	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
+		PMD_DRV_LOG(INFO,
+			"delete rule request is failed due to time out for programming");
+
+	return 0;
+};
+
+int
+iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_status *fdir_status;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->input.vsi_id = vf->vsi_res->vsi_id;
+	filter->input.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->input);
+	args.in_args_size = sizeof(*(&filter->input));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+		return err;
+	}
+
+	fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
+
+	if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
+		PMD_DRV_LOG(INFO,
+			"check rule request is successfully done by PF");
+	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
+		PMD_DRV_LOG(INFO,
+			"check rule request is failed due to parameters validation"
+			" or HW doesn't support");
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 32eabca..ce71054 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -13,6 +13,7 @@ sources = files(
 	'iavf_rxtx.c',
 	'iavf_vchnl.c',
 	'iavf_generic_flow.c',
+	'iavf_fdir.c',
 )
 
 if arch_subdir == 'x86'
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU
  2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-03-18  5:41 ` Simei Su
  2020-03-19  1:46   ` Zhang, Qi Z
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 43+ messages in thread
From: Simei Su @ 2020-03-18  5:41 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, yahui.cao, jingjing.wu, simei.su

This patch enables GTPU pattern for RTE_FLOW.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 67 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index dd321ba..ad100c8 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -67,6 +67,14 @@
 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
 
+#define IAVF_FDIR_INSET_GTPU (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID)
+
+#define IAVF_FDIR_INSET_GTPU_EH (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -77,6 +85,8 @@
 	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -360,6 +370,8 @@
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -686,6 +698,61 @@
 			filter->input.rule_cfg.proto_stack.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = item->spec;
+			gtp_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+					gtp_mask->msg_type ||
+					gtp_mask->msg_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				if (gtp_mask->teid == UINT32_MAX) {
+					input_set |= IAVF_INSET_GTPU_TEID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, GTPU_IP, TEID);
+				}
+
+				rte_memcpy(hdr->buffer,
+					gtp_spec, sizeof(*gtp_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+			gtp_psc_spec = item->spec;
+			gtp_psc_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+
+			if (gtp_psc_spec && gtp_psc_mask) {
+				if (gtp_psc_mask->qfi == UINT8_MAX) {
+					input_set |= IAVF_INSET_GTPU_QFI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, GTPU_EH, QFI);
+				}
+
+				rte_memcpy(hdr->buffer, gtp_psc_spec,
+					sizeof(*gtp_psc_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec
  2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule Simei Su
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU Simei Su
@ 2020-03-18  5:41 ` Simei Su
  2020-03-18  5:42 ` [dpdk-dev] [PATCH 4/5] net/iavf: add support for FDIR PFCP Simei Su
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-03-18  5:41 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, yahui.cao, jingjing.wu, simei.su

This patch enables L2TPv3, ESP, AH and NAT-T pattern for RTE_FLOW.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 97 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 97 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index ad100c8..70437d6 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,23 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
 
+#define IAVF_FDIR_INSET_L2TPV3OIP (\
+	IAVF_L2TPV3OIP_SESSION_ID)
+
+#define IAVF_FDIR_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -87,6 +104,14 @@
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -372,6 +397,9 @@
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
+	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
+	const struct rte_flow_item_esp *esp_spec, *esp_mask;
+	const struct rte_flow_item_ah *ah_spec, *ah_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -753,6 +781,75 @@
 			filter->input.rule_cfg.proto_stack.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+			l2tpv3oip_spec = item->spec;
+			l2tpv3oip_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+
+			if (l2tpv3oip_spec && l2tpv3oip_mask) {
+				if (l2tpv3oip_mask->session_id == UINT32_MAX) {
+					input_set |= IAVF_L2TPV3OIP_SESSION_ID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, L2TPV3, SESS_ID);
+				}
+
+				rte_memcpy(hdr->buffer, l2tpv3oip_spec,
+					sizeof(*l2tpv3oip_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_ESP:
+			esp_spec = item->spec;
+			esp_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+			if (esp_spec && esp_mask) {
+				if (esp_mask->hdr.spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_ESP_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, ESP, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, &esp_spec->hdr,
+					sizeof(esp_spec->hdr));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_AH:
+			ah_spec = item->spec;
+			ah_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+			if (ah_spec && ah_mask) {
+				if (ah_mask->spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_AH_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, AH, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, ah_spec,
+					sizeof(*ah_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH 4/5] net/iavf: add support for FDIR PFCP
  2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
                   ` (2 preceding siblings ...)
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
@ 2020-03-18  5:42 ` Simei Su
  2020-03-18  5:42 ` [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action Simei Su
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-03-18  5:42 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, yahui.cao, jingjing.wu, simei.su

This patch enables PFCP node and session pattern for RTE_FLOW.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 70437d6..8d49c28 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -92,6 +92,9 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_ESP_SPI)
 
+#define IAVF_FDIR_INSET_PFCP (\
+	IAVF_INSET_PFCP_S_FIELD)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -112,6 +115,8 @@
 	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -400,6 +405,7 @@
 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
+	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -850,6 +856,29 @@
 			filter->input.rule_cfg.proto_stack.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_PFCP:
+			pfcp_spec = item->spec;
+			pfcp_mask = item->mask;
+
+			hdr = &filter->input.rule_cfg.proto_stack.
+				proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+			if (pfcp_spec && pfcp_mask) {
+				if (pfcp_mask->s_field == UINT8_MAX) {
+					input_set |= IAVF_INSET_PFCP_S_FIELD;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
+						hdr, PFCP, S_FILED);
+				}
+
+				rte_memcpy(hdr->buffer, pfcp_spec,
+					sizeof(*pfcp_spec));
+			}
+
+			filter->input.rule_cfg.proto_stack.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action
  2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
                   ` (3 preceding siblings ...)
  2020-03-18  5:42 ` [dpdk-dev] [PATCH 4/5] net/iavf: add support for FDIR PFCP Simei Su
@ 2020-03-18  5:42 ` Simei Su
  2020-03-31  5:20   ` Cao, Yahui
  2020-03-18  5:56 ` [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Stephen Hemminger
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
  6 siblings, 1 reply; 43+ messages in thread
From: Simei Su @ 2020-03-18  5:42 UTC (permalink / raw)
  To: xiaolong.ye, qi.z.zhang; +Cc: dev, yahui.cao, jingjing.wu, simei.su

This patch enables mark action support and takes mark only case
into consideration.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf.h      |  1 +
 drivers/net/iavf/iavf_fdir.c | 46 +++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 62a3eb8..178d481 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -103,6 +103,7 @@ struct iavf_fdir_conf {
 	struct virtchnl_fdir_fltr input;
 	uint64_t input_set;
 	uint32_t flow_id;
+	uint32_t mark_flag;
 };
 
 struct iavf_fdir_info {
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 8d49c28..a03bc09 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -18,6 +18,7 @@
 #include "iavf.h"
 #include "iavf_generic_flow.h"
 #include "virtchnl.h"
+#include "iavf_rxtx.h"
 
 #define IAVF_FDIR_MAX_QREGION_SIZE 128
 
@@ -171,6 +172,9 @@
 		goto free_entry;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 1);
+
 	rte_memcpy(rule, filter, sizeof(*rule));
 	flow->rule = rule;
 
@@ -199,6 +203,9 @@
 		return -rte_errno;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 0);
+
 	flow->rule = NULL;
 	rte_free(filter);
 
@@ -297,7 +304,9 @@
 			struct iavf_fdir_conf *filter)
 {
 	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec = NULL;
 	uint32_t dest_num = 0;
+	uint32_t mark_num = 0;
 	int ret;
 
 	int number = 0;
@@ -367,6 +376,20 @@
 			filter->input.rule_cfg.action_set.count = ++number;
 			break;
 
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark_num++;
+
+			filter->mark_flag = 1;
+			mark_spec = actions->conf;
+			filter_action = &filter->input.rule_cfg.
+					action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_FDIR_ACT_MARK;
+			filter_action->mark_id = mark_spec->id;
+
+			filter->input.rule_cfg.action_set.count = ++number;
+			break;
+
 		default:
 			rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -375,13 +398,34 @@
 		}
 	}
 
-	if (dest_num == 0 || dest_num >= 2) {
+	if (dest_num >= 2) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
 			"Unsupported action combination");
 		return -rte_errno;
 	}
 
+	if (mark_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Too many mark actions");
+		return -rte_errno;
+	}
+
+	if (dest_num + mark_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Emtpy action");
+		return -rte_errno;
+	}
+
+	/* Mark only is equal to mark + passthru. */
+	if (dest_num == 0) {
+		filter_action = &filter->input.rule_cfg.
+				action_set.actions[number];
+		filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU;
+	}
+
 	return 0;
 }
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy
  2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
                   ` (4 preceding siblings ...)
  2020-03-18  5:42 ` [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action Simei Su
@ 2020-03-18  5:56 ` Stephen Hemminger
  2020-03-19  8:48   ` Su, Simei
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
  6 siblings, 1 reply; 43+ messages in thread
From: Stephen Hemminger @ 2020-03-18  5:56 UTC (permalink / raw)
  To: Simei Su; +Cc: xiaolong.ye, qi.z.zhang, dev, yahui.cao, jingjing.wu

On Wed, 18 Mar 2020 13:41:56 +0800
Simei Su <simei.su@intel.com> wrote:

> [PATCH 1/5] support FDIR common patterns and actions.
> [PATCH 2/5] support FDIR GTPU pattern.
> [PATCH 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
> [PATCH 4/5] support FDIR PFCP node and session pattern.
> [PATCH 5/5] support FDIR mark action.
> 
> This patchset depend on the following patches on patchwork:
> (1)https://patchwork.dpdk.org/patch/66764/
>     [1/2] net/iavf: support generic flow
> (2)https://patchwork.dpdk.org/patch/66765/
>     [2/2] net/iavf: support more patterns
> (3)https://patchwork.dpdk.org/patch/66682/
>     [07/12] net/iavf: add flow director enabled switch value
> (4)https://patchwork.dpdk.org/patch/66683/
>     [08/12] net/iavf: support flow mark in normal data path
> (5)https://patchwork.dpdk.org/patch/66684/
>     [09/12] net/iavf: support flow mark in AVX path
> (6)https://patchwork.dpdk.org/patch/66685/
>     [10/12] net/iavf: support flow mark in SSE path
> 
> Simei Su (5):
>   net/iavf: add support for FDIR basic rule
>   net/iavf: add support for FDIR GTPU
>   net/iavf: add support for FDIR L2TPv3 and IPSec
>   net/iavf: add support for FDIR PFCP
>   net/iavf: add support for FDIR mark action
> 
>  drivers/net/iavf/Makefile     |   1 +
>  drivers/net/iavf/iavf.h       |  17 +
>  drivers/net/iavf/iavf_fdir.c  | 999 ++++++++++++++++++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_vchnl.c | 128 +++++-
>  drivers/net/iavf/meson.build  |   1 +
>  5 files changed, 1145 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/net/iavf/iavf_fdir.c
> 

So Chenxu (from Intel) is removing fdir support
and Simei (from Intel) is adding fdir support??

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU Simei Su
@ 2020-03-19  1:46   ` Zhang, Qi Z
  0 siblings, 0 replies; 43+ messages in thread
From: Zhang, Qi Z @ 2020-03-19  1:46 UTC (permalink / raw)
  To: Su, Simei, Ye, Xiaolong; +Cc: dev, Cao, Yahui, Wu, Jingjing



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, March 18, 2020 1:42 PM
> To: Ye, Xiaolong <xiaolong.ye@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Su, Simei <simei.su@intel.com>
> Subject: [PATCH 2/5] net/iavf: add support for FDIR GTPU
> 
> This patch enables GTPU pattern for RTE_FLOW.

The comment is misleading, the GTPU pattern for rte_flow is already enabled in other patch, 
this patch actually add GTPU flow filter support in FDIR.
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>  drivers/net/iavf/iavf_fdir.c | 67
> ++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 67 insertions(+)
> 
> diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index
> dd321ba..ad100c8 100644
> --- a/drivers/net/iavf/iavf_fdir.c
> +++ b/drivers/net/iavf/iavf_fdir.c
> @@ -67,6 +67,14 @@
>  	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
>  	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> 
> +#define IAVF_FDIR_INSET_GTPU (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_GTPU_TEID)
> +
> +#define IAVF_FDIR_INSET_GTPU_EH (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
> +
>  static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
>  	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,
> 	IAVF_INSET_NONE},
>  	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,
> 	IAVF_INSET_NONE},
> @@ -77,6 +85,8 @@
>  	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,
> 		IAVF_INSET_NONE},
>  	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,
> 	IAVF_INSET_NONE},
>  	{iavf_pattern_eth_ipv6_sctp,
> 	IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,
> 	IAVF_INSET_NONE},
>  };
> 
>  static struct iavf_flow_parser iavf_fdir_parser; @@ -360,6 +370,8 @@
>  	const struct rte_flow_item_udp *udp_spec, *udp_mask;
>  	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
>  	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
> +	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
>  	uint64_t input_set = IAVF_INSET_NONE;
> 
>  	enum rte_flow_item_type next_type;
> @@ -686,6 +698,61 @@
>  			filter->input.rule_cfg.proto_stack.count = ++layer;
>  			break;
> 
> +		case RTE_FLOW_ITEM_TYPE_GTPU:
> +			gtp_spec = item->spec;
> +			gtp_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
> +
> +			if (gtp_spec && gtp_mask) {
> +				if (gtp_mask->v_pt_rsv_flags ||
> +					gtp_mask->msg_type ||
> +					gtp_mask->msg_len) {
> +					rte_flow_error_set(error, EINVAL,
> +						RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid GTP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (gtp_mask->teid == UINT32_MAX) {
> +					input_set |= IAVF_INSET_GTPU_TEID;
> +					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, GTPU_IP, TEID);
> +				}
> +
> +				rte_memcpy(hdr->buffer,
> +					gtp_spec, sizeof(*gtp_spec));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
> +			gtp_psc_spec = item->spec;
> +			gtp_psc_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
> +
> +			if (gtp_psc_spec && gtp_psc_mask) {
> +				if (gtp_psc_mask->qfi == UINT8_MAX) {
> +					input_set |= IAVF_INSET_GTPU_QFI;
> +					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, GTPU_EH, QFI);
> +				}
> +
> +				rte_memcpy(hdr->buffer, gtp_psc_spec,
> +					sizeof(*gtp_psc_spec));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
>  		case RTE_FLOW_ITEM_TYPE_VOID:
>  			break;
> 
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy
  2020-03-18  5:56 ` [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Stephen Hemminger
@ 2020-03-19  8:48   ` Su, Simei
  0 siblings, 0 replies; 43+ messages in thread
From: Su, Simei @ 2020-03-19  8:48 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: Ye, Xiaolong, Zhang, Qi Z, dev, Cao, Yahui, Wu, Jingjing

Hi, Stephen

> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Wednesday, March 18, 2020 1:56 PM
> To: Su, Simei <simei.su@intel.com>
> Cc: Ye, Xiaolong <xiaolong.ye@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>;
> Wu, Jingjing <jingjing.wu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy
> 
> On Wed, 18 Mar 2020 13:41:56 +0800
> Simei Su <simei.su@intel.com> wrote:
> 
> > [PATCH 1/5] support FDIR common patterns and actions.
> > [PATCH 2/5] support FDIR GTPU pattern.
> > [PATCH 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
> > [PATCH 4/5] support FDIR PFCP node and session pattern.
> > [PATCH 5/5] support FDIR mark action.
> >
> > This patchset depend on the following patches on patchwork:
> > (1)https://patchwork.dpdk.org/patch/66764/
> >     [1/2] net/iavf: support generic flow
> > (2)https://patchwork.dpdk.org/patch/66765/
> >     [2/2] net/iavf: support more patterns
> > (3)https://patchwork.dpdk.org/patch/66682/
> >     [07/12] net/iavf: add flow director enabled switch value
> > (4)https://patchwork.dpdk.org/patch/66683/
> >     [08/12] net/iavf: support flow mark in normal data path
> > (5)https://patchwork.dpdk.org/patch/66684/
> >     [09/12] net/iavf: support flow mark in AVX path
> > (6)https://patchwork.dpdk.org/patch/66685/
> >     [10/12] net/iavf: support flow mark in SSE path
> >
> > Simei Su (5):
> >   net/iavf: add support for FDIR basic rule
> >   net/iavf: add support for FDIR GTPU
> >   net/iavf: add support for FDIR L2TPv3 and IPSec
> >   net/iavf: add support for FDIR PFCP
> >   net/iavf: add support for FDIR mark action
> >
> >  drivers/net/iavf/Makefile     |   1 +
> >  drivers/net/iavf/iavf.h       |  17 +
> >  drivers/net/iavf/iavf_fdir.c  | 999
> > ++++++++++++++++++++++++++++++++++++++++++
> >  drivers/net/iavf/iavf_vchnl.c | 128 +++++-
> >  drivers/net/iavf/meson.build  |   1 +
> >  5 files changed, 1145 insertions(+), 1 deletion(-)  create mode
> > 100644 drivers/net/iavf/iavf_fdir.c
> >
> 
> So Chenxu (from Intel) is removing fdir support and Simei (from Intel) is adding
> fdir support??

Chenxu is removing fdir legacy API, while I enable advanced fdir rte_flow API.

Br
Simei


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule
  2020-03-18  5:41 ` [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-03-31  5:20   ` Cao, Yahui
  2020-03-31  7:12     ` Su, Simei
  0 siblings, 1 reply; 43+ messages in thread
From: Cao, Yahui @ 2020-03-31  5:20 UTC (permalink / raw)
  To: Su, Simei, Ye, Xiaolong, Zhang, Qi Z; +Cc: dev, Wu, Jingjing



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, March 18, 2020 1:42 PM
> To: Ye, Xiaolong <xiaolong.ye@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Su, Simei <simei.su@intel.com>
> Subject: [PATCH 1/5] net/iavf: add support for FDIR basic rule
> 
> This patch adds FDIR create/destroy/validate function in AVF.
> Common pattern and queue/qgroup/passthru/drop actions are supported.
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>  drivers/net/iavf/Makefile     |   1 +
>  drivers/net/iavf/iavf.h       |  16 +
>  drivers/net/iavf/iavf_fdir.c  | 762
> ++++++++++++++++++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_vchnl.c | 128 ++++++-
>  drivers/net/iavf/meson.build  |   1 +
>  5 files changed, 907 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/iavf/iavf_fdir.c
> 
> diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile index
> 1bf0f26..193bc55 100644
> --- a/drivers/net/iavf/Makefile
> +++ b/drivers/net/iavf/Makefile
> @@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
> +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
>  ifeq ($(CONFIG_RTE_ARCH_X86), y)
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c  endif diff --git
> a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 48b9509..62a3eb8
> 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -99,6 +99,16 @@ struct iavf_vsi {
>  struct iavf_flow_parser_node;
>  TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
> 
> +struct iavf_fdir_conf {
> +	struct virtchnl_fdir_fltr input;
> +	uint64_t input_set;
> +	uint32_t flow_id;
> +};
> +
> +struct iavf_fdir_info {
> +	struct iavf_fdir_conf conf;
> +};
> +
>  /* TODO: is that correct to assume the max number to be 16 ?*/
>  #define IAVF_MAX_MSIX_VECTORS   16
> 
> @@ -138,6 +148,8 @@ struct iavf_info {
>  	struct iavf_flow_list flow_list;
>  	struct iavf_parser_list rss_parser_list;
>  	struct iavf_parser_list dist_parser_list;
> +
> +	struct iavf_fdir_info fdir; /* flow director info */
>  };
> 
>  #define IAVF_MAX_PKT_TYPE 1024
> @@ -260,4 +272,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter,
> bool enable_unicast,  int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
>  			 struct rte_ether_addr *addr, bool add);  int
> iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
> +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> +*filter); int iavf_fdir_del(struct iavf_adapter *adapter, struct
> +iavf_fdir_conf *filter); int iavf_fdir_check(struct iavf_adapter *adapter,
> +		struct iavf_fdir_conf *filter);
>  #endif /* _IAVF_ETHDEV_H_ */
> diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c new file
> mode 100644 index 0000000..dd321ba
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_fdir.c
> @@ -0,0 +1,762 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_malloc.h>
> +#include <rte_tailq.h>
> +
> +#include "iavf.h"
> +#include "iavf_generic_flow.h"
> +#include "virtchnl.h"
> +
> +#define IAVF_FDIR_MAX_QREGION_SIZE 128
> +
> +#define IAVF_FDIR_IPV6_TC_OFFSET 20
> +#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
> +
> +#define IAVF_FDIR_INSET_ETH (\
> +	IAVF_INSET_ETHERTYPE)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4 (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
> +	IAVF_INSET_IPV4_TTL)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> +	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> +	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> +	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6 (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
> +	IAVF_INSET_IPV6_HOP_LIMIT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> +	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> +	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> +	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> +
> +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
> +	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_udp,
> 	IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_tcp,
> 	IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_sctp,
> 	IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6_udp,
> 	IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6_tcp,
> 	IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6_sctp,
> 	IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
> +};
> +
> +static struct iavf_flow_parser iavf_fdir_parser;
> +
> +static int
> +iavf_fdir_init(struct iavf_adapter *ad) {
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> +	struct iavf_flow_parser *parser;
> +
> +	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
> +		parser = &iavf_fdir_parser;
> +	else
> +		return -ENOTSUP;
> +
> +	return iavf_register_parser(parser, ad); }
> +
> +static void
> +iavf_fdir_uninit(struct iavf_adapter *ad) {
> +	struct iavf_flow_parser *parser;
> +
> +	parser = &iavf_fdir_parser;
> +
> +	iavf_unregister_parser(parser, ad);
> +}
> +
> +static int
> +iavf_fdir_create(struct iavf_adapter *ad,
> +		struct rte_flow *flow,
> +		void *meta,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_fdir_conf *filter = meta;
> +	struct iavf_fdir_conf *rule;
> +	int ret;
> +
> +	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
> +	if (!rule) {
> +		rte_flow_error_set(error, ENOMEM,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Failed to allocate memory");
> +		return -rte_errno;
> +	}
> +
> +	ret = iavf_fdir_add(ad, filter);
> +	if (ret) {
> +		rte_flow_error_set(error, -ret,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Add filter rule failed.");
> +		goto free_entry;
> +	}
> +
> +	rte_memcpy(rule, filter, sizeof(*rule));
> +	flow->rule = rule;
> +
> +	return 0;
> +
> +free_entry:
> +	rte_free(rule);
> +	return -rte_errno;
> +}
> +
> +static int
> +iavf_fdir_destroy(struct iavf_adapter *ad,
> +		struct rte_flow *flow,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_fdir_conf *filter;
> +	int ret;
> +
> +	filter = (struct iavf_fdir_conf *)flow->rule;
> +
> +	ret = iavf_fdir_del(ad, filter);
> +	if (ret) {
> +		rte_flow_error_set(error, -ret,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Del filter rule failed.");
> +		return -rte_errno;
> +	}
> +
> +	flow->rule = NULL;
> +	rte_free(filter);
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_validation(struct iavf_adapter *ad,
> +		__rte_unused struct rte_flow *flow,
> +		void *meta,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_fdir_conf *filter = meta;
> +	int ret;
> +
> +	ret = iavf_fdir_check(ad, filter);
> +	if (ret) {
> +		rte_flow_error_set(error, -ret,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Validate filter rule failed.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +};
> +
> +static struct iavf_flow_engine iavf_fdir_engine = {
> +	.init = iavf_fdir_init,
> +	.uninit = iavf_fdir_uninit,
> +	.create = iavf_fdir_create,
> +	.destroy = iavf_fdir_destroy,
> +	.validation = iavf_fdir_validation,
> +	.type = IAVF_FLOW_ENGINE_FDIR,
> +};
> +
> +static int
> +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
> +			struct rte_flow_error *error,
> +			const struct rte_flow_action *act,
> +			struct virtchnl_filter_action *filter_action) {
> +	const struct rte_flow_action_rss *rss = act->conf;
> +	uint32_t i;
> +
> +	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"Invalid action.");
> +		return -rte_errno;
> +	}
> +
> +	if (rss->queue_num <= 1) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"Queue region size can't be 0 or 1.");
> +		return -rte_errno;
> +	}
> +
> +	/* check if queue index for queue region is continuous */
> +	for (i = 0; i < rss->queue_num - 1; i++) {
> +		if (rss->queue[i + 1] != rss->queue[i] + 1) {
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ACTION, act,
> +					"Discontinuous queue region");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data-
> >nb_rx_queues) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"Invalid queue region indexes.");
> +		return -rte_errno;
> +	}
> +
> +	if (!(rte_is_power_of_2(rss->queue_num) &&
> +		(rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"The region size should be any of the following
> values:"
> +				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total
> number "
> +				"of queues do not exceed the VSI allocation.");
> +		return -rte_errno;
> +	}
> +
> +	filter_action->q_index = rss->queue[0];
> +	filter_action->q_region = rte_fls_u32(rss->queue_num) - 1;
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_parse_action(struct iavf_adapter *ad,
> +			const struct rte_flow_action actions[],
> +			struct rte_flow_error *error,
> +			struct iavf_fdir_conf *filter)
> +{
> +	const struct rte_flow_action_queue *act_q;
> +	uint32_t dest_num = 0;
> +	int ret;
> +
> +	int number = 0;
> +	struct virtchnl_filter_action *filter_action;
> +
> +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> +		switch (actions->type) {
> +		case RTE_FLOW_ACTION_TYPE_VOID:
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
> +			dest_num++;
> +
> +			filter_action = &filter->input.rule_cfg.
> +					action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU;
> +
> +			filter->input.rule_cfg.action_set.count = ++number;
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_DROP:
> +			dest_num++;
> +
> +			filter_action = &filter->input.rule_cfg.
> +					action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_FDIR_ACT_DROP;
> +
> +			filter->input.rule_cfg.action_set.count = ++number;
 [Cao, Yahui] 
It seems there is no count/number upper bound check, there may be out of bound index access
This also applies to all the count/number statement below.



> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_QUEUE:
> +			dest_num++;
> +
> +			act_q = actions->conf;
> +			filter_action = &filter->input.rule_cfg.
> +					action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_FDIR_ACT_QUEUE;
> +			filter_action->q_index = act_q->index;
> +
> +			if (filter_action->q_index >=
> +				ad->eth_dev->data->nb_rx_queues) {
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ACTION,
> +					actions, "Invalid queue for FDIR.");
> +				return -rte_errno;
> +			}
> +
> +			filter->input.rule_cfg.action_set.count = ++number;
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_RSS:
> +			dest_num++;
> +
> +			filter_action = &filter->input.rule_cfg.
> +					action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_FDIR_ACT_Q_REGION;
> +
> +			ret = iavf_fdir_parse_action_qregion(ad,
> +						error, actions, filter_action);
> +			if (ret)
> +				return ret;
> +
> +			filter->input.rule_cfg.action_set.count = ++number;
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ACTION,
> actions,
> +					"Invalid action.");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	if (dest_num == 0 || dest_num >= 2) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +			"Unsupported action combination");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
> +			const struct rte_flow_item pattern[],
> +			struct rte_flow_error *error,
> +			struct iavf_fdir_conf *filter)
> +{
> +	const struct rte_flow_item *item = pattern;
> +	enum rte_flow_item_type item_type;
> +	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
> +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	uint64_t input_set = IAVF_INSET_NONE;
> +
> +	enum rte_flow_item_type next_type;
> +	uint16_t ether_type;
> +
> +	int layer = 0;
> +	struct virtchnl_proto_hdr *hdr;
> +
> +	uint8_t  ipv6_addr_mask[16] = {
> +		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> +		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
> +	};
> +
> +	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM, item,
> +					"Not support range");
> +		}
> +
> +		item_type = item->type;
> +
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = item->spec;
> +			eth_mask = item->mask;
> +			next_type = (item + 1)->type;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
> +
> +			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
> +				(!eth_spec || !eth_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "NULL eth spec/mask.");
> +				return -rte_errno;
> +			}
> +
> +			if (eth_spec && eth_mask) {
> +				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
> +				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid MAC_addr mask.");
> +					return -rte_errno;
> +				}
> +			}
> +
> +			if (eth_spec && eth_mask && eth_mask->type) {
> +				if (eth_mask->type != RTE_BE16(0xffff)) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid type mask.");
> +					return -rte_errno;
> +				}
> +
> +				ether_type = rte_be_to_cpu_16(eth_spec-
> >type);
> +				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
> +					ether_type == RTE_ETHER_TYPE_IPV6)
> {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item,
> +						"Unsupported ether_type.");
> +					return -rte_errno;
> +				}
> +
> +				input_set |= IAVF_INSET_ETHERTYPE;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> +					ETH, ETHERTYPE);
> +
> +				rte_memcpy(hdr->buffer,
> +					eth_spec, sizeof(*eth_spec));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
[Cao, Yahui] 
It seems there is no count/layer upper bound check, there may be out of bound index access
This also applies to all the count/layer statement below.




> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
> +
> +			if (ipv4_spec && ipv4_mask) {
> +				if (ipv4_mask->hdr.version_ihl ||
> +					ipv4_mask->hdr.total_length ||
> +					ipv4_mask->hdr.packet_id ||
> +					ipv4_mask->hdr.fragment_offset ||
> +					ipv4_mask->hdr.hdr_checksum) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid IPv4 mask.");
> +					return -rte_errno;
> +				}
> +
> +				if (ipv4_mask->hdr.type_of_service ==
> +								UINT8_MAX) {
> +					input_set |= IAVF_INSET_IPV4_TOS;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV4, DSCP);
> +				}
> +				if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX) {
> +					input_set |= IAVF_INSET_IPV4_PROTO;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV4, PROT);
> +				}
> +				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> {
> +					input_set |= IAVF_INSET_IPV4_TTL;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV4, TTL);
> +				}
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> +					input_set |= IAVF_INSET_IPV4_SRC;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV4, SRC);
> +				}
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> +					input_set |= IAVF_INSET_IPV4_DST;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV4, DST);
> +				}
> +
> +				rte_memcpy(hdr->buffer,
> +					&ipv4_spec->hdr,
> +					sizeof(ipv4_spec->hdr));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
> +
> +			if (ipv6_spec && ipv6_mask) {
> +				if (ipv6_mask->hdr.payload_len) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid IPv6 mask");
> +					return -rte_errno;
> +				}
> +
> +				if ((ipv6_mask->hdr.vtc_flow &
> +
> 	rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> +					== rte_cpu_to_be_32(
> +							IAVF_IPV6_TC_MASK))
> {
> +					input_set |= IAVF_INSET_IPV6_TC;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV6, TC);
> +				}
> +				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> +					input_set |=
> IAVF_INSET_IPV6_NEXT_HDR;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV6, PROT);
> +				}
> +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
> +					input_set |=
> IAVF_INSET_IPV6_HOP_LIMIT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV6, HOP_LIMIT);
> +				}
> +				if (!memcmp(ipv6_mask->hdr.src_addr,
> +					ipv6_addr_mask,
> +					RTE_DIM(ipv6_mask->hdr.src_addr))) {
> +					input_set |= IAVF_INSET_IPV6_SRC;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV6, SRC);
> +				}
> +				if (!memcmp(ipv6_mask->hdr.dst_addr,
> +					ipv6_addr_mask,
> +					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
> +					input_set |= IAVF_INSET_IPV6_DST;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, IPV6, DST);
> +				}
> +
> +				rte_memcpy(hdr->buffer,
> +					&ipv6_spec->hdr,
> +					sizeof(ipv6_spec->hdr));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			udp_spec = item->spec;
> +			udp_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
> +
> +			if (udp_spec && udp_mask) {
> +				if (udp_mask->hdr.dgram_len ||
> +					udp_mask->hdr.dgram_cksum) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid UDP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (udp_mask->hdr.src_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_UDP_SRC_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, UDP, SRC_PORT);
> +				}
> +				if (udp_mask->hdr.dst_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_UDP_DST_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, UDP, DST_PORT);
> +				}
> +
> +				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> +					rte_memcpy(hdr->buffer,
> +						&udp_spec->hdr,
> +						sizeof(udp_spec->hdr));
> +				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> +					rte_memcpy(hdr->buffer,
> +						&udp_spec->hdr,
> +						sizeof(udp_spec->hdr));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			tcp_spec = item->spec;
> +			tcp_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
> +
> +			if (tcp_spec && tcp_mask) {
> +				if (tcp_mask->hdr.sent_seq ||
> +					tcp_mask->hdr.recv_ack ||
> +					tcp_mask->hdr.data_off ||
> +					tcp_mask->hdr.tcp_flags ||
> +					tcp_mask->hdr.rx_win ||
> +					tcp_mask->hdr.cksum ||
> +					tcp_mask->hdr.tcp_urp) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid TCP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (tcp_mask->hdr.src_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_TCP_SRC_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, TCP, SRC_PORT);
> +				}
> +				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_TCP_DST_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, TCP, DST_PORT);
> +				}
> +
> +				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> +					rte_memcpy(hdr->buffer,
> +						&tcp_spec->hdr,
> +						sizeof(tcp_spec->hdr));
> +				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> +					rte_memcpy(hdr->buffer,
> +						&tcp_spec->hdr,
> +						sizeof(tcp_spec->hdr));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_SCTP:
> +			sctp_spec = item->spec;
> +			sctp_mask = item->mask;
> +
> +			hdr = &filter->input.rule_cfg.proto_stack.
> +				proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
> +
> +			if (sctp_spec && sctp_mask) {
> +				if (sctp_mask->hdr.cksum) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid UDP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (sctp_mask->hdr.src_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_SCTP_SRC_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, SCTP, SRC_PORT);
> +				}
> +				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_SCTP_DST_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> +						hdr, SCTP, DST_PORT);
> +				}
> +
> +				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> +					rte_memcpy(hdr->buffer,
> +						&sctp_spec->hdr,
> +						sizeof(sctp_spec->hdr));
> +				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> +					rte_memcpy(hdr->buffer,
> +						&sctp_spec->hdr,
> +						sizeof(sctp_spec->hdr));
> +			}
> +
> +			filter->input.rule_cfg.proto_stack.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VOID:
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM, item,
> +					"Invalid pattern item.");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	filter->input_set = input_set;
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_parse(struct iavf_adapter *ad,
> +		struct iavf_pattern_match_item *array,
> +		uint32_t array_len,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		void **meta,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> +	struct iavf_fdir_conf *filter = &vf->fdir.conf;
> +	struct iavf_pattern_match_item *item = NULL;
> +	uint64_t input_set;
> +	int ret;
> +
> +	memset(filter, 0, sizeof(*filter));
> +
> +	item = iavf_search_pattern_match_item(pattern, array, array_len,
> error);
> +	if (!item)
> +		return -rte_errno;
> +
> +	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
> +	if (ret)
> +		goto error;
> +
> +	input_set = filter->input_set;
> +	if (!input_set || input_set & ~item->input_set_mask) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
> +				"Invalid input set");
> +		ret = -rte_errno;
> +		goto error;
> +	}
> +
> +	ret = iavf_fdir_parse_action(ad, actions, error, filter);
> +	if (ret)
> +		goto error;
> +
> +	if (meta)
> +		*meta = filter;
> +
> +error:
> +	rte_free(item);
> +	return ret;
> +}
> +
> +static struct iavf_flow_parser iavf_fdir_parser = {
> +	.engine = &iavf_fdir_engine,
> +	.array = iavf_fdir_pattern,
> +	.array_len = RTE_DIM(iavf_fdir_pattern),
> +	.parse_pattern_action = iavf_fdir_parse,
> +	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
> +};
> +
> +RTE_INIT(iavf_fdir_engine_register)
> +{
> +	iavf_register_flow_engine(&iavf_fdir_engine);
> +}
> diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index
> 11c70f5..77bfd1b 100644
> --- a/drivers/net/iavf/iavf_vchnl.c
> +++ b/drivers/net/iavf/iavf_vchnl.c
> @@ -342,7 +342,8 @@
> 
>  	caps = IAVF_BASIC_OFFLOAD_CAPS |
> VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
>  		VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
> -		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
> +		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
> +		VIRTCHNL_VF_OFFLOAD_FDIR_PF;
> 
>  	args.in_args = (uint8_t *)&caps;
>  	args.in_args_size = sizeof(caps);
> @@ -867,3 +868,128 @@
> 
>  	return err;
>  }
> +
> +int
> +iavf_fdir_add(struct iavf_adapter *adapter,
> +	struct iavf_fdir_conf *filter)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +	struct virtchnl_fdir_status *fdir_status;
> +
> +	struct iavf_cmd_info args;
> +	int err;
> +
> +	filter->input.vsi_id = vf->vsi_res->vsi_id;
> +	filter->input.validate_only = 0;
> +
> +	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> +	args.in_args = (uint8_t *)(&filter->input);
> +	args.in_args_size = sizeof(*(&filter->input));
> +	args.out_buffer = vf->aq_resp;
> +	args.out_size = IAVF_AQ_BUF_SZ;
> +
> +	err = iavf_execute_vf_cmd(adapter, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR, "fail to execute command
> OP_ADD_FDIR_FILTER");
> +		return err;
> +	}
> +
> +	fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> +	filter->flow_id = fdir_status->flow_id;
> +
> +	if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> +		PMD_DRV_LOG(INFO,
> +			"add rule request is successfully done by PF");
> +	else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE)
> +		PMD_DRV_LOG(INFO,
> +			"add rule request is failed due to no hw resource");
> +	else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT)
> +		PMD_DRV_LOG(INFO,
> +			"add rule request is failed due to the rule is already
> existed");
> +	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
> +		PMD_DRV_LOG(INFO,
> +			"add rule request is failed due to the hw doesn't
> support");
> +	else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> +		PMD_DRV_LOG(INFO,
> +			"add rule request is failed due to time out for
> programming");
> +
> +	return 0;
> +};
> +
> +int
> +iavf_fdir_del(struct iavf_adapter *adapter,
> +	struct iavf_fdir_conf *filter)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +	struct virtchnl_fdir_status *fdir_status;
> +
> +	struct iavf_cmd_info args;
> +	int err;
> +
> +	filter->input.vsi_id = vf->vsi_res->vsi_id;
> +	filter->input.flow_id = filter->flow_id;
> +
> +	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
> +	args.in_args = (uint8_t *)(&filter->input);
> +	args.in_args_size = sizeof(filter->input);
> +	args.out_buffer = vf->aq_resp;
> +	args.out_size = IAVF_AQ_BUF_SZ;
> +
> +	err = iavf_execute_vf_cmd(adapter, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR, "fail to execute command
> OP_DEL_FDIR_FILTER");
> +		return err;
> +	}
> +
> +	fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> +
> +	if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> +		PMD_DRV_LOG(INFO,
> +			"delete rule request is successfully done by PF");
> +	else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST)
> +		PMD_DRV_LOG(INFO,
> +			"delete rule request is failed due to this rule doesn't
> exist");
> +	else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> +		PMD_DRV_LOG(INFO,
> +			"delete rule request is failed due to time out for
> programming");
> +
> +	return 0;
> +};
> +
> +int
> +iavf_fdir_check(struct iavf_adapter *adapter,
> +		struct iavf_fdir_conf *filter)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +	struct virtchnl_fdir_status *fdir_status;
> +
> +	struct iavf_cmd_info args;
> +	int err;
> +
> +	filter->input.vsi_id = vf->vsi_res->vsi_id;
> +	filter->input.validate_only = 1;
> +
> +	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> +	args.in_args = (uint8_t *)(&filter->input);
> +	args.in_args_size = sizeof(*(&filter->input));
> +	args.out_buffer = vf->aq_resp;
> +	args.out_size = IAVF_AQ_BUF_SZ;
> +
> +	err = iavf_execute_vf_cmd(adapter, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
> +		return err;
> +	}
> +
> +	fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> +
> +	if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> +		PMD_DRV_LOG(INFO,
> +			"check rule request is successfully done by PF");
> +	else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
> +		PMD_DRV_LOG(INFO,
> +			"check rule request is failed due to parameters
> validation"
> +			" or HW doesn't support");
> +
> +	return 0;
> +}
> diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index
> 32eabca..ce71054 100644
> --- a/drivers/net/iavf/meson.build
> +++ b/drivers/net/iavf/meson.build
> @@ -13,6 +13,7 @@ sources = files(
>  	'iavf_rxtx.c',
>  	'iavf_vchnl.c',
>  	'iavf_generic_flow.c',
> +	'iavf_fdir.c',
>  )
> 
>  if arch_subdir == 'x86'
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action
  2020-03-18  5:42 ` [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action Simei Su
@ 2020-03-31  5:20   ` Cao, Yahui
  2020-03-31  7:05     ` Su, Simei
  0 siblings, 1 reply; 43+ messages in thread
From: Cao, Yahui @ 2020-03-31  5:20 UTC (permalink / raw)
  To: Su, Simei, Ye, Xiaolong, Zhang, Qi Z; +Cc: dev, Wu, Jingjing



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, March 18, 2020 1:42 PM
> To: Ye, Xiaolong <xiaolong.ye@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Su, Simei <simei.su@intel.com>
> Subject: [PATCH 5/5] net/iavf: add support for FDIR mark action
> 
> This patch enables mark action support and takes mark only case into
> consideration.
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>  drivers/net/iavf/iavf.h      |  1 +
>  drivers/net/iavf/iavf_fdir.c | 46
> +++++++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 46 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> 62a3eb8..178d481 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -103,6 +103,7 @@ struct iavf_fdir_conf {
>  	struct virtchnl_fdir_fltr input;
>  	uint64_t input_set;
>  	uint32_t flow_id;
> +	uint32_t mark_flag;
>  };
> 
>  struct iavf_fdir_info {
> diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index
> 8d49c28..a03bc09 100644
> --- a/drivers/net/iavf/iavf_fdir.c
> +++ b/drivers/net/iavf/iavf_fdir.c
> @@ -18,6 +18,7 @@
>  #include "iavf.h"
>  #include "iavf_generic_flow.h"
>  #include "virtchnl.h"
> +#include "iavf_rxtx.h"
> 
>  #define IAVF_FDIR_MAX_QREGION_SIZE 128
> 
> @@ -171,6 +172,9 @@
>  		goto free_entry;
>  	}
> 
> +	if (filter->mark_flag == 1)
> +		iavf_fdir_rx_proc_enable(ad, 1);
> +
>  	rte_memcpy(rule, filter, sizeof(*rule));
>  	flow->rule = rule;
> 
> @@ -199,6 +203,9 @@
>  		return -rte_errno;
>  	}
> 
> +	if (filter->mark_flag == 1)
> +		iavf_fdir_rx_proc_enable(ad, 0);
> +
>  	flow->rule = NULL;
>  	rte_free(filter);
> 
> @@ -297,7 +304,9 @@
>  			struct iavf_fdir_conf *filter)
>  {
>  	const struct rte_flow_action_queue *act_q;
> +	const struct rte_flow_action_mark *mark_spec = NULL;
>  	uint32_t dest_num = 0;
> +	uint32_t mark_num = 0;
>  	int ret;
> 
>  	int number = 0;
> @@ -367,6 +376,20 @@
>  			filter->input.rule_cfg.action_set.count = ++number;
>  			break;
> 
> +		case RTE_FLOW_ACTION_TYPE_MARK:
> +			mark_num++;
> +
> +			filter->mark_flag = 1;
> +			mark_spec = actions->conf;
> +			filter_action = &filter->input.rule_cfg.
> +					action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_FDIR_ACT_MARK;
> +			filter_action->mark_id = mark_spec->id;
> +
> +			filter->input.rule_cfg.action_set.count = ++number;
> +			break;
> +
>  		default:
>  			rte_flow_error_set(error, EINVAL,
>  					RTE_FLOW_ERROR_TYPE_ACTION,
> actions, @@ -375,13 +398,34 @@
>  		}
>  	}
> 
> -	if (dest_num == 0 || dest_num >= 2) {
> +	if (dest_num >= 2) {
>  		rte_flow_error_set(error, EINVAL,
>  			RTE_FLOW_ERROR_TYPE_ACTION, actions,
>  			"Unsupported action combination");
>  		return -rte_errno;
>  	}
> 
> +	if (mark_num >= 2) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +			"Too many mark actions");
> +		return -rte_errno;
> +	}
> +
> +	if (dest_num + mark_num == 0) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +			"Emtpy action");
> +		return -rte_errno;
> +	}
> +
> +	/* Mark only is equal to mark + passthru. */
> +	if (dest_num == 0) {
> +		filter_action = &filter->input.rule_cfg.
> +				action_set.actions[number];
> +		filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU;
[Cao, Yahui]  
Miss "filter->input.rule_cfg.action_set.count = ++number;" here

> +	}
> +
>  	return 0;
>  }
> 
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action
  2020-03-31  5:20   ` Cao, Yahui
@ 2020-03-31  7:05     ` Su, Simei
  0 siblings, 0 replies; 43+ messages in thread
From: Su, Simei @ 2020-03-31  7:05 UTC (permalink / raw)
  To: Cao, Yahui, Ye, Xiaolong, Zhang, Qi Z; +Cc: dev, Wu, Jingjing

Hi, Yahui

> -----Original Message-----
> From: Cao, Yahui <yahui.cao@intel.com>
> Sent: Tuesday, March 31, 2020 1:21 PM
> To: Su, Simei <simei.su@intel.com>; Ye, Xiaolong <xiaolong.ye@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>
> Subject: RE: [PATCH 5/5] net/iavf: add support for FDIR mark action
> 
> 
> 
> > -----Original Message-----
> > From: Su, Simei <simei.su@intel.com>
> > Sent: Wednesday, March 18, 2020 1:42 PM
> > To: Ye, Xiaolong <xiaolong.ye@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>; Su, Simei <simei.su@intel.com>
> > Subject: [PATCH 5/5] net/iavf: add support for FDIR mark action
> >
> > This patch enables mark action support and takes mark only case into
> > consideration.
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > ---
> >  drivers/net/iavf/iavf.h      |  1 +
> >  drivers/net/iavf/iavf_fdir.c | 46
> > +++++++++++++++++++++++++++++++++++++++++++-
> >  2 files changed, 46 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> > 62a3eb8..178d481 100644
> > --- a/drivers/net/iavf/iavf.h
> > +++ b/drivers/net/iavf/iavf.h
> > @@ -103,6 +103,7 @@ struct iavf_fdir_conf {  struct virtchnl_fdir_fltr
> > input;  uint64_t input_set;  uint32_t flow_id;
> > +uint32_t mark_flag;
> >  };
> >
> >  struct iavf_fdir_info {
> > diff --git a/drivers/net/iavf/iavf_fdir.c
> > b/drivers/net/iavf/iavf_fdir.c index
> > 8d49c28..a03bc09 100644
> > --- a/drivers/net/iavf/iavf_fdir.c
> > +++ b/drivers/net/iavf/iavf_fdir.c
> > @@ -18,6 +18,7 @@
> >  #include "iavf.h"
> >  #include "iavf_generic_flow.h"
> >  #include "virtchnl.h"
> > +#include "iavf_rxtx.h"
> >
> >  #define IAVF_FDIR_MAX_QREGION_SIZE 128
> >
> > @@ -171,6 +172,9 @@
> >  goto free_entry;
> >  }
> >
> > +if (filter->mark_flag == 1)
> > +iavf_fdir_rx_proc_enable(ad, 1);
> > +
> >  rte_memcpy(rule, filter, sizeof(*rule));  flow->rule = rule;
> >
> > @@ -199,6 +203,9 @@
> >  return -rte_errno;
> >  }
> >
> > +if (filter->mark_flag == 1)
> > +iavf_fdir_rx_proc_enable(ad, 0);
> > +
> >  flow->rule = NULL;
> >  rte_free(filter);
> >
> > @@ -297,7 +304,9 @@
> >  struct iavf_fdir_conf *filter)
> >  {
> >  const struct rte_flow_action_queue *act_q;
> > +const struct rte_flow_action_mark *mark_spec = NULL;
> >  uint32_t dest_num = 0;
> > +uint32_t mark_num = 0;
> >  int ret;
> >
> >  int number = 0;
> > @@ -367,6 +376,20 @@
> >  filter->input.rule_cfg.action_set.count = ++number;  break;
> >
> > +case RTE_FLOW_ACTION_TYPE_MARK:
> > +mark_num++;
> > +
> > +filter->mark_flag = 1;
> > +mark_spec = actions->conf;
> > +filter_action = &filter->input.rule_cfg.
> > +action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_FDIR_ACT_MARK; filter_action->mark_id
> > += mark_spec->id;
> > +
> > +filter->input.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> >  default:
> >  rte_flow_error_set(error, EINVAL,
> >  RTE_FLOW_ERROR_TYPE_ACTION,
> > actions, @@ -375,13 +398,34 @@
> >  }
> >  }
> >
> > -if (dest_num == 0 || dest_num >= 2) {
> > +if (dest_num >= 2) {
> >  rte_flow_error_set(error, EINVAL,
> >  RTE_FLOW_ERROR_TYPE_ACTION, actions,
> >  "Unsupported action combination");
> >  return -rte_errno;
> >  }
> >
> > +if (mark_num >= 2) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, actions,
> > +"Too many mark actions");
> > +return -rte_errno;
> > +}
> > +
> > +if (dest_num + mark_num == 0) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, actions,
> > +"Emtpy action");
> > +return -rte_errno;
> > +}
> > +
> > +/* Mark only is equal to mark + passthru. */ if (dest_num == 0) {
> > +filter_action = &filter->input.rule_cfg.
> > +action_set.actions[number];
> > +filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU;
> [Cao, Yahui]
> Miss "filter->input.rule_cfg.action_set.count = ++number;" here

 Yes, I have found this issue and already added in my following patch, will be included in the next version. Thanks!

Br
Simei

> 
> > +}
> > +
> >  return 0;
> >  }
> >
> > --
> > 1.8.3.1
> 


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule
  2020-03-31  5:20   ` Cao, Yahui
@ 2020-03-31  7:12     ` Su, Simei
  0 siblings, 0 replies; 43+ messages in thread
From: Su, Simei @ 2020-03-31  7:12 UTC (permalink / raw)
  To: Cao, Yahui, Ye, Xiaolong, Zhang, Qi Z; +Cc: dev, Wu, Jingjing

Hi, Yahui

> -----Original Message-----
> From: Cao, Yahui <yahui.cao@intel.com>
> Sent: Tuesday, March 31, 2020 1:20 PM
> To: Su, Simei <simei.su@intel.com>; Ye, Xiaolong <xiaolong.ye@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>
> Subject: RE: [PATCH 1/5] net/iavf: add support for FDIR basic rule
> 
> 
> 
> > -----Original Message-----
> > From: Su, Simei <simei.su@intel.com>
> > Sent: Wednesday, March 18, 2020 1:42 PM
> > To: Ye, Xiaolong <xiaolong.ye@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>; Su, Simei <simei.su@intel.com>
> > Subject: [PATCH 1/5] net/iavf: add support for FDIR basic rule
> >
> > This patch adds FDIR create/destroy/validate function in AVF.
> > Common pattern and queue/qgroup/passthru/drop actions are supported.
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > ---
> >  drivers/net/iavf/Makefile     |   1 +
> >  drivers/net/iavf/iavf.h       |  16 +
> >  drivers/net/iavf/iavf_fdir.c  | 762
> > ++++++++++++++++++++++++++++++++++++++++++
> >  drivers/net/iavf/iavf_vchnl.c | 128 ++++++-
> >  drivers/net/iavf/meson.build  |   1 +
> >  5 files changed, 907 insertions(+), 1 deletion(-)  create mode 100644
> > drivers/net/iavf/iavf_fdir.c
> >
> > diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
> > index
> > 1bf0f26..193bc55 100644
> > --- a/drivers/net/iavf/Makefile
> > +++ b/drivers/net/iavf/Makefile
> > @@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) +=
> iavf_ethdev.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
> > +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
> >  ifeq ($(CONFIG_RTE_ARCH_X86), y)
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c  endif
> diff
> > --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> > 48b9509..62a3eb8
> > 100644
> > --- a/drivers/net/iavf/iavf.h
> > +++ b/drivers/net/iavf/iavf.h
> > @@ -99,6 +99,16 @@ struct iavf_vsi {
> >  struct iavf_flow_parser_node;
> >  TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
> >
> > +struct iavf_fdir_conf {
> > +struct virtchnl_fdir_fltr input;
> > +uint64_t input_set;
> > +uint32_t flow_id;
> > +};
> > +
> > +struct iavf_fdir_info {
> > +struct iavf_fdir_conf conf;
> > +};
> > +
> >  /* TODO: is that correct to assume the max number to be 16 ?*/
> >  #define IAVF_MAX_MSIX_VECTORS   16
> >
> > @@ -138,6 +148,8 @@ struct iavf_info {  struct iavf_flow_list
> > flow_list;  struct iavf_parser_list rss_parser_list;  struct
> > iavf_parser_list dist_parser_list;
> > +
> > +struct iavf_fdir_info fdir; /* flow director info */
> >  };
> >
> >  #define IAVF_MAX_PKT_TYPE 1024
> > @@ -260,4 +272,8 @@ int iavf_config_promisc(struct iavf_adapter
> > *adapter, bool enable_unicast,  int iavf_add_del_eth_addr(struct
> iavf_adapter *adapter,
> >   struct rte_ether_addr *addr, bool add);  int
> > iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool
> > add);
> > +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter); int iavf_fdir_del(struct iavf_adapter *adapter, struct
> > +iavf_fdir_conf *filter); int iavf_fdir_check(struct iavf_adapter
> > +*adapter, struct iavf_fdir_conf *filter);
> >  #endif /* _IAVF_ETHDEV_H_ */
> > diff --git a/drivers/net/iavf/iavf_fdir.c
> > b/drivers/net/iavf/iavf_fdir.c new file mode 100644 index
> > 0000000..dd321ba
> > --- /dev/null
> > +++ b/drivers/net/iavf/iavf_fdir.c
> > @@ -0,0 +1,762 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2019 Intel Corporation  */
> > +
> > +#include <sys/queue.h>
> > +#include <stdio.h>
> > +#include <errno.h>
> > +#include <stdint.h>
> > +#include <string.h>
> > +#include <unistd.h>
> > +#include <stdarg.h>
> > +
> > +#include <rte_ether.h>
> > +#include <rte_ethdev_driver.h>
> > +#include <rte_malloc.h>
> > +#include <rte_tailq.h>
> > +
> > +#include "iavf.h"
> > +#include "iavf_generic_flow.h"
> > +#include "virtchnl.h"
> > +
> > +#define IAVF_FDIR_MAX_QREGION_SIZE 128
> > +
> > +#define IAVF_FDIR_IPV6_TC_OFFSET 20
> > +#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
> > +
> > +#define IAVF_FDIR_INSET_ETH (\
> > +IAVF_INSET_ETHERTYPE)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4 (\
> > +IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> IAVF_INSET_IPV4_PROTO |
> > +IAVF_INSET_IPV4_TOS | \
> > +IAVF_INSET_IPV4_TTL)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6 (\
> > +IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> > +IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
> > +IAVF_INSET_IPV6_HOP_LIMIT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_UDP_SRC_PORT |
> > +IAVF_INSET_UDP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_TCP_SRC_PORT |
> > +IAVF_INSET_TCP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_SCTP_SRC_PORT |
> > +IAVF_INSET_SCTP_DST_PORT)
> > +
> > +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
> > +{iavf_pattern_ethertype,IAVF_FDIR_INSET_ETH,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4,IAVF_FDIR_INSET_ETH_IPV4,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_udp,
> > IAVF_FDIR_INSET_ETH_IPV4_UDP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_tcp,
> > IAVF_FDIR_INSET_ETH_IPV4_TCP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_sctp,
> > IAVF_FDIR_INSET_ETH_IPV4_SCTP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6,IAVF_FDIR_INSET_ETH_IPV6,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_udp,
> > IAVF_FDIR_INSET_ETH_IPV6_UDP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_tcp,
> > IAVF_FDIR_INSET_ETH_IPV6_TCP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_sctp,
> > IAVF_FDIR_INSET_ETH_IPV6_SCTP,IAVF_INSET_NONE},
> > +};
> > +
> > +static struct iavf_flow_parser iavf_fdir_parser;
> > +
> > +static int
> > +iavf_fdir_init(struct iavf_adapter *ad) { struct iavf_info *vf =
> > +IAVF_DEV_PRIVATE_TO_VF(ad); struct iavf_flow_parser *parser;
> > +
> > +if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) parser =
> > +&iavf_fdir_parser; else return -ENOTSUP;
> > +
> > +return iavf_register_parser(parser, ad); }
> > +
> > +static void
> > +iavf_fdir_uninit(struct iavf_adapter *ad) { struct iavf_flow_parser
> > +*parser;
> > +
> > +parser = &iavf_fdir_parser;
> > +
> > +iavf_unregister_parser(parser, ad);
> > +}
> > +
> > +static int
> > +iavf_fdir_create(struct iavf_adapter *ad, struct rte_flow *flow, void
> > +*meta, struct rte_flow_error *error) { struct iavf_fdir_conf *filter
> > += meta; struct iavf_fdir_conf *rule; int ret;
> > +
> > +rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); if (!rule) {
> > +rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> > +"Failed to allocate memory"); return -rte_errno; }
> > +
> > +ret = iavf_fdir_add(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Add filter rule failed.");
> > +goto free_entry;
> > +}
> > +
> > +rte_memcpy(rule, filter, sizeof(*rule));
> > +flow->rule = rule;
> > +
> > +return 0;
> > +
> > +free_entry:
> > +rte_free(rule);
> > +return -rte_errno;
> > +}
> > +
> > +static int
> > +iavf_fdir_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
> > +struct rte_flow_error *error) { struct iavf_fdir_conf *filter; int
> > +ret;
> > +
> > +filter = (struct iavf_fdir_conf *)flow->rule;
> > +
> > +ret = iavf_fdir_del(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Del filter rule failed.");
> > +return -rte_errno;
> > +}
> > +
> > +flow->rule = NULL;
> > +rte_free(filter);
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_validation(struct iavf_adapter *ad, __rte_unused struct
> > +rte_flow *flow, void *meta, struct rte_flow_error *error) { struct
> > +iavf_fdir_conf *filter = meta; int ret;
> > +
> > +ret = iavf_fdir_check(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Validate filter rule failed.");
> > +return -rte_errno;
> > +}
> > +
> > +return 0;
> > +};
> > +
> > +static struct iavf_flow_engine iavf_fdir_engine = { .init =
> > +iavf_fdir_init, .uninit = iavf_fdir_uninit, .create =
> > +iavf_fdir_create, .destroy = iavf_fdir_destroy, .validation =
> > +iavf_fdir_validation, .type = IAVF_FLOW_ENGINE_FDIR, };
> > +
> > +static int
> > +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad, struct
> > +rte_flow_error *error, const struct rte_flow_action *act, struct
> > +virtchnl_filter_action *filter_action) { const struct
> > +rte_flow_action_rss *rss = act->conf; uint32_t i;
> > +
> > +if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> > +rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Invalid action."); return -rte_errno; }
> > +
> > +if (rss->queue_num <= 1) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Queue region size can't be 0 or 1."); return -rte_errno; }
> > +
> > +/* check if queue index for queue region is continuous */ for (i = 0;
> > +i < rss->queue_num - 1; i++) { if (rss->queue[i + 1] != rss->queue[i]
> > ++ 1) { rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> > +act, "Discontinuous queue region"); return -rte_errno; } }
> > +
> > +if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data-
> > >nb_rx_queues) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Invalid queue region indexes.");
> > +return -rte_errno;
> > +}
> > +
> > +if (!(rte_is_power_of_2(rss->queue_num) && (rss->queue_num <=
> > +IAVF_FDIR_MAX_QREGION_SIZE))) { rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act, "The region size should be any of
> > +the following
> > values:"
> > +"1, 2, 4, 8, 16, 32, 64, 128 as long as the total
> > number "
> > +"of queues do not exceed the VSI allocation."); return -rte_errno; }
> > +
> > +filter_action->q_index = rss->queue[0]; filter_action->q_region =
> > +rte_fls_u32(rss->queue_num) - 1;
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse_action(struct iavf_adapter *ad, const struct
> > +rte_flow_action actions[], struct rte_flow_error *error, struct
> > +iavf_fdir_conf *filter) { const struct rte_flow_action_queue *act_q;
> > +uint32_t dest_num = 0; int ret;
> > +
> > +int number = 0;
> > +struct virtchnl_filter_action *filter_action;
> > +
> > +for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch
> > +(actions->type) { case RTE_FLOW_ACTION_TYPE_VOID:
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_PASSTHRU:
> > +dest_num++;
> > +
> > +filter_action = &filter->input.rule_cfg.
> > +action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU;
> > +
> > +filter->input.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_DROP:
> > +dest_num++;
> > +
> > +filter_action = &filter->input.rule_cfg.
> > +action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_FDIR_ACT_DROP;
> > +
> > +filter->input.rule_cfg.action_set.count = ++number;
>  [Cao, Yahui]
> It seems there is no count/number upper bound check, there may be out of
> bound index access This also applies to all the count/number statement
> below.
> 
  Yes, forgot to check this. Thanks for your reminder.

Br
Simei

> 
> 
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_QUEUE:
> > +dest_num++;
> > +
> > +act_q = actions->conf;
> > +filter_action = &filter->input.rule_cfg.
> > +action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_FDIR_ACT_QUEUE;
> > +filter_action->q_index = act_q->index;
> > +
> > +if (filter_action->q_index >=
> > +ad->eth_dev->data->nb_rx_queues) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION,
> > +actions, "Invalid queue for FDIR.");
> > +return -rte_errno;
> > +}
> > +
> > +filter->input.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_RSS:
> > +dest_num++;
> > +
> > +filter_action = &filter->input.rule_cfg.
> > +action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_FDIR_ACT_Q_REGION;
> > +
> > +ret = iavf_fdir_parse_action_qregion(ad,
> > +error, actions, filter_action);
> > +if (ret)
> > +return ret;
> > +
> > +filter->input.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +default:
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION,
> > actions,
> > +"Invalid action.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (dest_num == 0 || dest_num >= 2) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, actions,
> > +"Unsupported action combination");
> > +return -rte_errno;
> > +}
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
> > +const struct rte_flow_item pattern[],
> > +struct rte_flow_error *error,
> > +struct iavf_fdir_conf *filter)
> > +{
> > +const struct rte_flow_item *item = pattern;
> > +enum rte_flow_item_type item_type;
> > +enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
> > +const struct rte_flow_item_eth *eth_spec, *eth_mask;
> > +const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> > +const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> > +const struct rte_flow_item_udp *udp_spec, *udp_mask;
> > +const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> > +const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> > +uint64_t input_set = IAVF_INSET_NONE;
> > +
> > +enum rte_flow_item_type next_type;
> > +uint16_t ether_type;
> > +
> > +int layer = 0;
> > +struct virtchnl_proto_hdr *hdr;
> > +
> > +uint8_t  ipv6_addr_mask[16] = {
> > +0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> > +0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
> > +};
> > +
> > +for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> > {
> > +if (item->last) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Not support range");
> > +}
> > +
> > +item_type = item->type;
> > +
> > +switch (item_type) {
> > +case RTE_FLOW_ITEM_TYPE_ETH:
> > +eth_spec = item->spec;
> > +eth_mask = item->mask;
> > +next_type = (item + 1)->type;
> > +
> > +hdr = &filter->input.rule_cfg.proto_stack.
> > +proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
> > +
> > +if (next_type == RTE_FLOW_ITEM_TYPE_END &&
> > +(!eth_spec || !eth_mask)) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "NULL eth spec/mask.");
> > +return -rte_errno;
> > +}
> > +
> > +if (eth_spec && eth_mask) {
> > +if (!rte_is_zero_ether_addr(&eth_mask->src) ||
> > +    !rte_is_zero_ether_addr(&eth_mask->dst)) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid MAC_addr mask.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (eth_spec && eth_mask && eth_mask->type) {
> > +if (eth_mask->type != RTE_BE16(0xffff)) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid type mask.");
> > +return -rte_errno;
> > +}
> > +
> > +ether_type = rte_be_to_cpu_16(eth_spec-
> > >type);
> > +if (ether_type == RTE_ETHER_TYPE_IPV4 ||
> > +ether_type == RTE_ETHER_TYPE_IPV6)
> > {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item,
> > +"Unsupported ether_type.");
> > +return -rte_errno;
> > +}
> > +
> > +input_set |= IAVF_INSET_ETHERTYPE;
> > +VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> > +ETH, ETHERTYPE);
> > +
> > +rte_memcpy(hdr->buffer,
> > +eth_spec, sizeof(*eth_spec));
> > +}
> > +
> > +filter->input.rule_cfg.proto_stack.count = ++layer;
> [Cao, Yahui]
> It seems there is no count/layer upper bound check, there may be out of
> bound index access
> This also applies to all the count/layer statement below.
> 
 Yes, also forgot to check this. Thanks again for your reminder.

Br
Simei

> 
> 
> 
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_IPV4:
> > +l3 = RTE_FLOW_ITEM_TYPE_IPV4;
> > +ipv4_spec = item->spec;
> > +ipv4_mask = item->mask;
> > +
> > +hdr = &filter->input.rule_cfg.proto_stack.
> > +proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
> > +
> > +if (ipv4_spec && ipv4_mask) {
> > +if (ipv4_mask->hdr.version_ihl ||
> > +ipv4_mask->hdr.total_length ||
> > +ipv4_mask->hdr.packet_id ||
> > +ipv4_mask->hdr.fragment_offset ||
> > +ipv4_mask->hdr.hdr_checksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid IPv4 mask.");
> > +return -rte_errno;
> > +}
> > +
> > +if (ipv4_mask->hdr.type_of_service ==
> > +UINT8_MAX) {
> > +input_set |= IAVF_INSET_IPV4_TOS;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV4, DSCP);
> > +}
> > +if (ipv4_mask->hdr.next_proto_id ==
> > UINT8_MAX) {
> > +input_set |= IAVF_INSET_IPV4_PROTO;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV4, PROT);
> > +}
> > +if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> > {
> > +input_set |= IAVF_INSET_IPV4_TTL;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV4, TTL);
> > +}
> > +if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> > +input_set |= IAVF_INSET_IPV4_SRC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV4, SRC);
> > +}
> > +if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> > +input_set |= IAVF_INSET_IPV4_DST;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV4, DST);
> > +}
> > +
> > +rte_memcpy(hdr->buffer,
> > +&ipv4_spec->hdr,
> > +sizeof(ipv4_spec->hdr));
> > +}
> > +
> > +filter->input.rule_cfg.proto_stack.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_IPV6:
> > +l3 = RTE_FLOW_ITEM_TYPE_IPV6;
> > +ipv6_spec = item->spec;
> > +ipv6_mask = item->mask;
> > +
> > +hdr = &filter->input.rule_cfg.proto_stack.
> > +proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
> > +
> > +if (ipv6_spec && ipv6_mask) {
> > +if (ipv6_mask->hdr.payload_len) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid IPv6 mask");
> > +return -rte_errno;
> > +}
> > +
> > +if ((ipv6_mask->hdr.vtc_flow &
> > +
> > rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> > +== rte_cpu_to_be_32(
> > +IAVF_IPV6_TC_MASK))
> > {
> > +input_set |= IAVF_INSET_IPV6_TC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV6, TC);
> > +}
> > +if (ipv6_mask->hdr.proto == UINT8_MAX) {
> > +input_set |=
> > IAVF_INSET_IPV6_NEXT_HDR;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV6, PROT);
> > +}
> > +if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
> > +input_set |=
> > IAVF_INSET_IPV6_HOP_LIMIT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV6, HOP_LIMIT);
> > +}
> > +if (!memcmp(ipv6_mask->hdr.src_addr,
> > +ipv6_addr_mask,
> > +RTE_DIM(ipv6_mask->hdr.src_addr))) {
> > +input_set |= IAVF_INSET_IPV6_SRC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV6, SRC);
> > +}
> > +if (!memcmp(ipv6_mask->hdr.dst_addr,
> > +ipv6_addr_mask,
> > +RTE_DIM(ipv6_mask->hdr.dst_addr))) {
> > +input_set |= IAVF_INSET_IPV6_DST;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, IPV6, DST);
> > +}
> > +
> > +rte_memcpy(hdr->buffer,
> > +&ipv6_spec->hdr,
> > +sizeof(ipv6_spec->hdr));
> > +}
> > +
> > +filter->input.rule_cfg.proto_stack.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_UDP:
> > +udp_spec = item->spec;
> > +udp_mask = item->mask;
> > +
> > +hdr = &filter->input.rule_cfg.proto_stack.
> > +proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
> > +
> > +if (udp_spec && udp_mask) {
> > +if (udp_mask->hdr.dgram_len ||
> > +udp_mask->hdr.dgram_cksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid UDP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (udp_mask->hdr.src_port == UINT16_MAX) {
> > +input_set |=
> > IAVF_INSET_UDP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, UDP, SRC_PORT);
> > +}
> > +if (udp_mask->hdr.dst_port == UINT16_MAX) {
> > +input_set |=
> > IAVF_INSET_UDP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, UDP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&udp_spec->hdr,
> > +sizeof(udp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> > +rte_memcpy(hdr->buffer,
> > +&udp_spec->hdr,
> > +sizeof(udp_spec->hdr));
> > +}
> > +
> > +filter->input.rule_cfg.proto_stack.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_TCP:
> > +tcp_spec = item->spec;
> > +tcp_mask = item->mask;
> > +
> > +hdr = &filter->input.rule_cfg.proto_stack.
> > +proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
> > +
> > +if (tcp_spec && tcp_mask) {
> > +if (tcp_mask->hdr.sent_seq ||
> > +tcp_mask->hdr.recv_ack ||
> > +tcp_mask->hdr.data_off ||
> > +tcp_mask->hdr.tcp_flags ||
> > +tcp_mask->hdr.rx_win ||
> > +tcp_mask->hdr.cksum ||
> > +tcp_mask->hdr.tcp_urp) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid TCP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (tcp_mask->hdr.src_port == UINT16_MAX) {
> > +input_set |=
> > IAVF_INSET_TCP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, TCP, SRC_PORT);
> > +}
> > +if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> > +input_set |=
> > IAVF_INSET_TCP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, TCP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&tcp_spec->hdr,
> > +sizeof(tcp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> > +rte_memcpy(hdr->buffer,
> > +&tcp_spec->hdr,
> > +sizeof(tcp_spec->hdr));
> > +}
> > +
> > +filter->input.rule_cfg.proto_stack.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_SCTP:
> > +sctp_spec = item->spec;
> > +sctp_mask = item->mask;
> > +
> > +hdr = &filter->input.rule_cfg.proto_stack.
> > +proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
> > +
> > +if (sctp_spec && sctp_mask) {
> > +if (sctp_mask->hdr.cksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid UDP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (sctp_mask->hdr.src_port == UINT16_MAX) {
> > +input_set |=
> > IAVF_INSET_SCTP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, SCTP, SRC_PORT);
> > +}
> > +if (sctp_mask->hdr.dst_port == UINT16_MAX) {
> > +input_set |=
> > IAVF_INSET_SCTP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> > +hdr, SCTP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&sctp_spec->hdr,
> > +sizeof(sctp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> > +rte_memcpy(hdr->buffer,
> > +&sctp_spec->hdr,
> > +sizeof(sctp_spec->hdr));
> > +}
> > +
> > +filter->input.rule_cfg.proto_stack.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_VOID:
> > +break;
> > +
> > +default:
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid pattern item.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +filter->input_set = input_set;
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse(struct iavf_adapter *ad,
> > +struct iavf_pattern_match_item *array,
> > +uint32_t array_len,
> > +const struct rte_flow_item pattern[],
> > +const struct rte_flow_action actions[],
> > +void **meta,
> > +struct rte_flow_error *error)
> > +{
> > +struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> > +struct iavf_fdir_conf *filter = &vf->fdir.conf;
> > +struct iavf_pattern_match_item *item = NULL;
> > +uint64_t input_set;
> > +int ret;
> > +
> > +memset(filter, 0, sizeof(*filter));
> > +
> > +item = iavf_search_pattern_match_item(pattern, array, array_len,
> > error);
> > +if (!item)
> > +return -rte_errno;
> > +
> > +ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
> > +if (ret)
> > +goto error;
> > +
> > +input_set = filter->input_set;
> > +if (!input_set || input_set & ~item->input_set_mask) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
> > +"Invalid input set");
> > +ret = -rte_errno;
> > +goto error;
> > +}
> > +
> > +ret = iavf_fdir_parse_action(ad, actions, error, filter);
> > +if (ret)
> > +goto error;
> > +
> > +if (meta)
> > +*meta = filter;
> > +
> > +error:
> > +rte_free(item);
> > +return ret;
> > +}
> > +
> > +static struct iavf_flow_parser iavf_fdir_parser = {
> > +.engine = &iavf_fdir_engine,
> > +.array = iavf_fdir_pattern,
> > +.array_len = RTE_DIM(iavf_fdir_pattern),
> > +.parse_pattern_action = iavf_fdir_parse,
> > +.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
> > +};
> > +
> > +RTE_INIT(iavf_fdir_engine_register)
> > +{
> > +iavf_register_flow_engine(&iavf_fdir_engine);
> > +}
> > diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index
> > 11c70f5..77bfd1b 100644
> > --- a/drivers/net/iavf/iavf_vchnl.c
> > +++ b/drivers/net/iavf/iavf_vchnl.c
> > @@ -342,7 +342,8 @@
> >
> >  caps = IAVF_BASIC_OFFLOAD_CAPS |
> > VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
> >  VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
> > -VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
> > +VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
> > +VIRTCHNL_VF_OFFLOAD_FDIR_PF;
> >
> >  args.in_args = (uint8_t *)&caps;
> >  args.in_args_size = sizeof(caps);
> > @@ -867,3 +868,128 @@
> >
> >  return err;
> >  }
> > +
> > +int
> > +iavf_fdir_add(struct iavf_adapter *adapter,
> > +struct iavf_fdir_conf *filter)
> > +{
> > +struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_status *fdir_status;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->input.vsi_id = vf->vsi_res->vsi_id;
> > +filter->input.validate_only = 0;
> > +
> > +args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> > +args.in_args = (uint8_t *)(&filter->input);
> > +args.in_args_size = sizeof(*(&filter->input));
> > +args.out_buffer = vf->aq_resp;
> > +args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args);
> > +if (err) {
> > +PMD_DRV_LOG(ERR, "fail to execute command
> > OP_ADD_FDIR_FILTER");
> > +return err;
> > +}
> > +
> > +fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> > +filter->flow_id = fdir_status->flow_id;
> > +
> > +if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> > +PMD_DRV_LOG(INFO,
> > +"add rule request is successfully done by PF");
> > +else if (fdir_status->status ==
> > VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE)
> > +PMD_DRV_LOG(INFO,
> > +"add rule request is failed due to no hw resource");
> > +else if (fdir_status->status ==
> > VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT)
> > +PMD_DRV_LOG(INFO,
> > +"add rule request is failed due to the rule is already
> > existed");
> > +else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
> > +PMD_DRV_LOG(INFO,
> > +"add rule request is failed due to the hw doesn't
> > support");
> > +else if (fdir_status->status ==
> > VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> > +PMD_DRV_LOG(INFO,
> > +"add rule request is failed due to time out for
> > programming");
> > +
> > +return 0;
> > +};
> > +
> > +int
> > +iavf_fdir_del(struct iavf_adapter *adapter,
> > +struct iavf_fdir_conf *filter)
> > +{
> > +struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_status *fdir_status;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->input.vsi_id = vf->vsi_res->vsi_id;
> > +filter->input.flow_id = filter->flow_id;
> > +
> > +args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
> > +args.in_args = (uint8_t *)(&filter->input);
> > +args.in_args_size = sizeof(filter->input);
> > +args.out_buffer = vf->aq_resp;
> > +args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args);
> > +if (err) {
> > +PMD_DRV_LOG(ERR, "fail to execute command
> > OP_DEL_FDIR_FILTER");
> > +return err;
> > +}
> > +
> > +fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> > +
> > +if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> > +PMD_DRV_LOG(INFO,
> > +"delete rule request is successfully done by PF");
> > +else if (fdir_status->status ==
> > VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST)
> > +PMD_DRV_LOG(INFO,
> > +"delete rule request is failed due to this rule doesn't
> > exist");
> > +else if (fdir_status->status ==
> > VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> > +PMD_DRV_LOG(INFO,
> > +"delete rule request is failed due to time out for
> > programming");
> > +
> > +return 0;
> > +};
> > +
> > +int
> > +iavf_fdir_check(struct iavf_adapter *adapter,
> > +struct iavf_fdir_conf *filter)
> > +{
> > +struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_status *fdir_status;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->input.vsi_id = vf->vsi_res->vsi_id;
> > +filter->input.validate_only = 1;
> > +
> > +args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> > +args.in_args = (uint8_t *)(&filter->input);
> > +args.in_args_size = sizeof(*(&filter->input));
> > +args.out_buffer = vf->aq_resp;
> > +args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args);
> > +if (err) {
> > +PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
> > +return err;
> > +}
> > +
> > +fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> > +
> > +if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> > +PMD_DRV_LOG(INFO,
> > +"check rule request is successfully done by PF");
> > +else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
> > +PMD_DRV_LOG(INFO,
> > +"check rule request is failed due to parameters
> > validation"
> > +" or HW doesn't support");
> > +
> > +return 0;
> > +}
> > diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
> index
> > 32eabca..ce71054 100644
> > --- a/drivers/net/iavf/meson.build
> > +++ b/drivers/net/iavf/meson.build
> > @@ -13,6 +13,7 @@ sources = files(
> >  'iavf_rxtx.c',
> >  'iavf_vchnl.c',
> >  'iavf_generic_flow.c',
> > +'iavf_fdir.c',
> >  )
> >
> >  if arch_subdir == 'x86'
> > --
> > 1.8.3.1
> 


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v2 0/5] net/iavf: support FDIR capabiltiy
  2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
                   ` (5 preceding siblings ...)
  2020-03-18  5:56 ` [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Stephen Hemminger
@ 2020-04-02 13:32 ` Simei Su
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule Simei Su
                     ` (5 more replies)
  6 siblings, 6 replies; 43+ messages in thread
From: Simei Su @ 2020-04-02 13:32 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

[PATCH 1/5] support FDIR common patterns and actions.
[PATCH 2/5] support FDIR GTPU pattern.
[PATCH 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
[PATCH 4/5] support FDIR PFCP node and session pattern.
[PATCH 5/5] support FDIR mark action.

This patchset depend on the following patches on patchwork:
(1)https://patchwork.dpdk.org/patch/67410/
    [v2,1/2] net/iavf: support generic flow
(2)https://patchwork.dpdk.org/patch/67411/
    [v2,2/2] net/iavf: support more patterns
(3)https://patchwork.dpdk.org/patch/67464/
    [v2,07/12] net/iavf: add flow director enabled switch value
(4)https://patchwork.dpdk.org/patch/67465/
    [v2,08/12] net/iavf: support flow mark in normal data path
(5)https://patchwork.dpdk.org/patch/67466/
    [v2,09/12] net/iavf: support flow mark in AVX path
(6)https://patchwork.dpdk.org/patch/67467
    [v2,10/12] net/iavf: support flow mark in SSE path

v2:
* Update pattern and action structures based on latest virtchnl design.
* Add upper bound check for pattern layers and action numbers.
* Increase action number in mark only case.
* Consider more circumstances about PF error return status.

Simei Su (5):
  net/iavf: add support for FDIR basic rule
  net/iavf: add support for FDIR GTPU
  net/iavf: add support for FDIR L2TPv3 and IPSec
  net/iavf: add support for FDIR PFCP
  net/iavf: add support for FDIR mark action

 drivers/net/iavf/Makefile     |   1 +
 drivers/net/iavf/iavf.h       |  18 +
 drivers/net/iavf/iavf_fdir.c  | 973 ++++++++++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c | 152 ++++++-
 drivers/net/iavf/meson.build  |   1 +
 5 files changed, 1144 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
@ 2020-04-02 13:32   ` Simei Su
  2020-04-10  7:40     ` Cao, Yahui
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 2/5] net/iavf: add support for FDIR GTPU Simei Su
                     ` (4 subsequent siblings)
  5 siblings, 1 reply; 43+ messages in thread
From: Simei Su @ 2020-04-02 13:32 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch adds FDIR create/destroy/validate function in AVF.
Common pattern and queue/qgroup/passthru/drop actions are supported.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/Makefile     |   1 +
 drivers/net/iavf/iavf.h       |  17 +
 drivers/net/iavf/iavf_fdir.c  | 749 ++++++++++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c | 152 ++++++++-
 drivers/net/iavf/meson.build  |   1 +
 5 files changed, 919 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
index 7b0093a..b2b75d7 100644
--- a/drivers/net/iavf/Makefile
+++ b/drivers/net/iavf/Makefile
@@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
 ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
 endif
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index afec8b2..e2b1d5f 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -106,6 +106,17 @@ struct iavf_vsi {
 struct iavf_flow_parser_node;
 TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
 
+struct iavf_fdir_conf {
+	struct virtchnl_fdir_add add_fltr;
+	struct virtchnl_fdir_del del_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
+struct iavf_fdir_info {
+	struct iavf_fdir_conf conf;
+};
+
 /* TODO: is that correct to assume the max number to be 16 ?*/
 #define IAVF_MAX_MSIX_VECTORS   16
 
@@ -145,6 +156,8 @@ struct iavf_info {
 	struct iavf_flow_list flow_list;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+
+	struct iavf_fdir_info fdir; /* flow director info */
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -270,4 +283,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
 int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 			 struct virtchnl_rss_cfg *rss_cfg, bool add);
+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
new file mode 100644
index 0000000..ea529b6
--- /dev/null
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -0,0 +1,749 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+#include "virtchnl.h"
+
+#define IAVF_FDIR_MAX_QREGION_SIZE 128
+
+#define IAVF_FDIR_IPV6_TC_OFFSET 20
+#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
+
+#define IAVF_FDIR_INSET_ETH (\
+	IAVF_INSET_ETHERTYPE)
+
+#define IAVF_FDIR_INSET_ETH_IPV4 (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6 (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+};
+
+static struct iavf_flow_parser iavf_fdir_parser;
+
+static int
+iavf_fdir_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+		parser = &iavf_fdir_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fdir_uninit(struct iavf_adapter *ad)
+{
+	struct iavf_flow_parser *parser;
+
+	parser = &iavf_fdir_parser;
+
+	iavf_unregister_parser(parser, ad);
+}
+
+static int
+iavf_fdir_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	struct iavf_fdir_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory");
+		return -rte_errno;
+	}
+
+	ret = iavf_fdir_add(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Add filter rule failed.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+iavf_fdir_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fdir_conf *)flow->rule;
+
+	ret = iavf_fdir_del(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Del filter rule failed.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return 0;
+}
+
+static int
+iavf_fdir_validation(struct iavf_adapter *ad,
+		__rte_unused struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	int ret;
+
+	ret = iavf_fdir_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Validate filter rule failed.");
+		return -rte_errno;
+	}
+
+	return 0;
+};
+
+static struct iavf_flow_engine iavf_fdir_engine = {
+	.init = iavf_fdir_init,
+	.uninit = iavf_fdir_uninit,
+	.create = iavf_fdir_create,
+	.destroy = iavf_fdir_destroy,
+	.validation = iavf_fdir_validation,
+	.type = IAVF_FLOW_ENGINE_FDIR,
+};
+
+static int
+iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
+			struct rte_flow_error *error,
+			const struct rte_flow_action *act,
+			struct virtchnl_filter_action *filter_action)
+{
+	const struct rte_flow_action_rss *rss = act->conf;
+	uint32_t i;
+
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid action.");
+		return -rte_errno;
+	}
+
+	if (rss->queue_num <= 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Queue region size can't be 0 or 1.");
+		return -rte_errno;
+	}
+
+	/* check if queue index for queue region is continuous */
+	for (i = 0; i < rss->queue_num - 1; i++) {
+		if (rss->queue[i + 1] != rss->queue[i] + 1) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, act,
+					"Discontinuous queue region");
+			return -rte_errno;
+		}
+	}
+
+	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid queue region indexes.");
+		return -rte_errno;
+	}
+
+	if (!(rte_is_power_of_2(rss->queue_num) &&
+		rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"The region size should be any of the following values:"
+				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+				"of queues do not exceed the VSI allocation.");
+		return -rte_errno;
+	}
+
+	filter_action->q_index = rss->queue[0];
+	filter_action->q_region = rte_fls_u32(rss->queue_num) - 1;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_action(struct iavf_adapter *ad,
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint32_t dest_num = 0;
+	int ret;
+
+	int number = 0;
+	struct virtchnl_filter_action *filter_action;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_DROP;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+
+			act_q = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->q_index = act_q->index;
+
+			if (filter_action->q_index >=
+				ad->eth_dev->data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue for FDIR.");
+				return -rte_errno;
+			}
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+
+			ret = iavf_fdir_parse_action_qregion(ad,
+						error, actions, filter_action);
+			if (ret)
+				return ret;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, actions,
+					"Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
+			const struct rte_flow_item pattern[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = IAVF_INSET_NONE;
+
+	enum rte_flow_item_type next_type;
+	uint16_t ether_type;
+
+	int layer = 0;
+	struct virtchnl_proto_hdr *hdr;
+
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not support range");
+		}
+
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			next_type = (item + 1)->type;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+				(!eth_spec || !eth_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "NULL eth spec/mask.");
+				return -rte_errno;
+			}
+
+			if (eth_spec && eth_mask) {
+				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid MAC_addr mask.");
+					return -rte_errno;
+				}
+			}
+
+			if (eth_spec && eth_mask && eth_mask->type) {
+				if (eth_mask->type != RTE_BE16(0xffff)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid type mask.");
+					return -rte_errno;
+				}
+
+				ether_type = rte_be_to_cpu_16(eth_spec->type);
+				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+					ether_type == RTE_ETHER_TYPE_IPV6) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Unsupported ether_type.");
+					return -rte_errno;
+				}
+
+				input_set |= IAVF_INSET_ETHERTYPE;
+				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+
+				rte_memcpy(hdr->buffer,
+					eth_spec, sizeof(*eth_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.fragment_offset ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+
+				if (ipv4_mask->hdr.type_of_service ==
+								UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TOS;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_PROTO;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TTL;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv4_spec->hdr,
+					sizeof(ipv4_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+
+				if ((ipv6_mask->hdr.vtc_flow &
+					rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+					== rte_cpu_to_be_32(
+							IAVF_IPV6_TC_MASK)) {
+					input_set |= IAVF_INSET_IPV6_TC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
+				}
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.src_addr))) {
+					input_set |= IAVF_INSET_IPV6_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+				}
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+					input_set |= IAVF_INSET_IPV6_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv6_spec->hdr,
+					sizeof(ipv6_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				if (udp_mask->hdr.dgram_len ||
+					udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+			if (sctp_spec && sctp_mask) {
+				if (sctp_mask->hdr.cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, item,
+			"Protocol header layers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_fdir_conf *filter = &vf->fdir.conf;
+	struct iavf_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		goto error;
+
+	input_set = filter->input_set;
+	if (!input_set || input_set & ~item->input_set_mask) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
+				"Invalid input set");
+		ret = -rte_errno;
+		goto error;
+	}
+
+	ret = iavf_fdir_parse_action(ad, actions, error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(item);
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_fdir_parser = {
+	.engine = &iavf_fdir_engine,
+	.array = iavf_fdir_pattern,
+	.array_len = RTE_DIM(iavf_fdir_pattern),
+	.parse_pattern_action = iavf_fdir_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fdir_engine_register)
+{
+	iavf_register_flow_engine(&iavf_fdir_engine);
+}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 2307969..133e81c 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -339,7 +339,8 @@
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
-		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FDIR_PF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -906,3 +907,152 @@
 
 	return err;
 }
+
+int
+iavf_fdir_add(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 0;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+	filter->flow_id = fdir_ret->flow_id;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"add rule request is successfully done by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to no hw resource");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to the rule is already existed");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to the hw doesn't support");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_del(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_del *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->del_fltr.flow_id = filter->flow_id;
+
+	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->del_fltr);
+	args.in_args_size = sizeof(filter->del_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"delete rule request is successfully done by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR,
+			"delete rule request is failed due to this rule doesn't exist");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"delete rule request is failed due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"delete rule request is failed due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"check rule request is successfully done by PF");
+	}  else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"check rule request is failed due to parameters validation"
+			" or HW doesn't support");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"check rule request is failed due to other reasons");
+		return -1;
+	}
+
+	return 0;
+}
+
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 5a5cdd5..f875b72 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -14,6 +14,7 @@ sources = files(
 	'iavf_vchnl.c',
 	'iavf_generic_flow.c',
 	'iavf_hash.c',
+	'iavf_fdir.c',
 )
 
 if arch_subdir == 'x86'
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v2 2/5] net/iavf: add support for FDIR GTPU
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-04-02 13:32   ` Simei Su
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-02 13:32 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables GTPU with TEID and QFI for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 63 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 63 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index ea529b6..6a915ba 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -67,6 +67,14 @@
 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
 
+#define IAVF_FDIR_INSET_GTPU (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID)
+
+#define IAVF_FDIR_INSET_GTPU_EH (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -77,6 +85,8 @@
 	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -363,6 +373,8 @@
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -666,6 +678,57 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = item->spec;
+			gtp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+					gtp_mask->msg_type ||
+					gtp_mask->msg_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				if (gtp_mask->teid == UINT32_MAX) {
+					input_set |= IAVF_INSET_GTPU_TEID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
+				}
+
+				rte_memcpy(hdr->buffer,
+					gtp_spec, sizeof(*gtp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+			gtp_psc_spec = item->spec;
+			gtp_psc_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+
+			if (gtp_psc_spec && gtp_psc_mask) {
+				if (gtp_psc_mask->qfi == UINT8_MAX) {
+					input_set |= IAVF_INSET_GTPU_QFI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
+				}
+
+				rte_memcpy(hdr->buffer, gtp_psc_spec,
+					sizeof(*gtp_psc_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v2 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule Simei Su
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 2/5] net/iavf: add support for FDIR GTPU Simei Su
@ 2020-04-02 13:32   ` Simei Su
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 4/5] net/iavf: add support for FDIR PFCP Simei Su
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-02 13:32 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables L2TPv3 with SESSION_ID, ESP/AH with SPI, NAT-T
with SPI and IP src/dst for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 91 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 6a915ba..e0f4941 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,23 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
 
+#define IAVF_FDIR_INSET_L2TPV3OIP (\
+	IAVF_L2TPV3OIP_SESSION_ID)
+
+#define IAVF_FDIR_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -87,6 +104,14 @@
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -375,6 +400,9 @@
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
+	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
+	const struct rte_flow_item_esp *esp_spec, *esp_mask;
+	const struct rte_flow_item_ah *ah_spec, *ah_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -729,6 +757,69 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+			l2tpv3oip_spec = item->spec;
+			l2tpv3oip_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+
+			if (l2tpv3oip_spec && l2tpv3oip_mask) {
+				if (l2tpv3oip_mask->session_id == UINT32_MAX) {
+					input_set |= IAVF_L2TPV3OIP_SESSION_ID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+				}
+
+				rte_memcpy(hdr->buffer, l2tpv3oip_spec,
+					sizeof(*l2tpv3oip_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_ESP:
+			esp_spec = item->spec;
+			esp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+			if (esp_spec && esp_mask) {
+				if (esp_mask->hdr.spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_ESP_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, &esp_spec->hdr,
+					sizeof(esp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_AH:
+			ah_spec = item->spec;
+			ah_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+			if (ah_spec && ah_mask) {
+				if (ah_mask->spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_AH_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, ah_spec,
+					sizeof(*ah_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v2 4/5] net/iavf: add support for FDIR PFCP
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
                     ` (2 preceding siblings ...)
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
@ 2020-04-02 13:32   ` Simei Su
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 5/5] net/iavf: add support for FDIR mark action Simei Su
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-02 13:32 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables PFCP node and sesssion packets with S_FIELD
for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index e0f4941..ca0ccd0 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -92,6 +92,9 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_ESP_SPI)
 
+#define IAVF_FDIR_INSET_PFCP (\
+	IAVF_INSET_PFCP_S_FIELD)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -112,6 +115,8 @@
 	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -403,6 +408,7 @@
 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
+	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -820,6 +826,27 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_PFCP:
+			pfcp_spec = item->spec;
+			pfcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+			if (pfcp_spec && pfcp_mask) {
+				if (pfcp_mask->s_field == UINT8_MAX) {
+					input_set |= IAVF_INSET_PFCP_S_FIELD;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+				}
+
+				rte_memcpy(hdr->buffer, pfcp_spec,
+					sizeof(*pfcp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v2 5/5] net/iavf: add support for FDIR mark action
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
                     ` (3 preceding siblings ...)
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 4/5] net/iavf: add support for FDIR PFCP Simei Su
@ 2020-04-02 13:32   ` Simei Su
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-02 13:32 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables mark action support and takes mark only case
into consideration.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf.h      |  1 +
 drivers/net/iavf/iavf_fdir.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index e2b1d5f..0f77945 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -111,6 +111,7 @@ struct iavf_fdir_conf {
 	struct virtchnl_fdir_del del_fltr;
 	uint64_t input_set;
 	uint32_t flow_id;
+	uint32_t mark_flag;
 };
 
 struct iavf_fdir_info {
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index ca0ccd0..98f792b 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -18,6 +18,7 @@
 #include "iavf.h"
 #include "iavf_generic_flow.h"
 #include "virtchnl.h"
+#include "iavf_rxtx.h"
 
 #define IAVF_FDIR_MAX_QREGION_SIZE 128
 
@@ -171,6 +172,9 @@
 		goto free_entry;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 1);
+
 	rte_memcpy(rule, filter, sizeof(*rule));
 	flow->rule = rule;
 
@@ -199,6 +203,9 @@
 		return -rte_errno;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 0);
+
 	flow->rule = NULL;
 	rte_free(filter);
 
@@ -297,7 +304,9 @@
 			struct iavf_fdir_conf *filter)
 {
 	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec = NULL;
 	uint32_t dest_num = 0;
+	uint32_t mark_num = 0;
 	int ret;
 
 	int number = 0;
@@ -363,6 +372,19 @@
 			filter->add_fltr.rule_cfg.action_set.count = ++number;
 			break;
 
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark_num++;
+
+			filter->mark_flag = 1;
+			mark_spec = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_MARK;
+			filter_action->mark_id = mark_spec->id;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
 		default:
 			rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -378,13 +400,34 @@
 		return -rte_errno;
 	}
 
-	if (dest_num == 0 || dest_num >= 2) {
+	if (dest_num >= 2) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
 			"Unsupported action combination");
 		return -rte_errno;
 	}
 
+	if (mark_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Too many mark actions");
+		return -rte_errno;
+	}
+
+	if (dest_num + mark_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Emtpy action");
+		return -rte_errno;
+	}
+
+	/* Mark only is equal to mark + passthru. */
+	if (dest_num == 0) {
+		filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+		filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+		filter->add_fltr.rule_cfg.action_set.count = ++number;
+	}
+
 	return 0;
 }
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-04-10  7:40     ` Cao, Yahui
  2020-04-10  8:00       ` Su, Simei
  0 siblings, 1 reply; 43+ messages in thread
From: Cao, Yahui @ 2020-04-10  7:40 UTC (permalink / raw)
  To: Su, Simei, Zhang, Qi Z, Ye, Xiaolong, Wu, Jingjing; +Cc: dev



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Thursday, April 2, 2020 9:33 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong <xiaolong.ye@intel.com>;
> Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
> 
> This patch adds FDIR create/destroy/validate function in AVF.
> Common pattern and queue/qgroup/passthru/drop actions are supported.
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>  drivers/net/iavf/Makefile     |   1 +
>  drivers/net/iavf/iavf.h       |  17 +
>  drivers/net/iavf/iavf_fdir.c  | 749
> ++++++++++++++++++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_vchnl.c | 152 ++++++++-
>  drivers/net/iavf/meson.build  |   1 +
>  5 files changed, 919 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/iavf/iavf_fdir.c
> 
> diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile index
> 7b0093a..b2b75d7 100644
> --- a/drivers/net/iavf/Makefile
> +++ b/drivers/net/iavf/Makefile
> @@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
> +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
>  ifeq ($(CONFIG_RTE_ARCH_X86), y)
>  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c  endif diff --git
> a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index afec8b2..e2b1d5f
> 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -106,6 +106,17 @@ struct iavf_vsi {
>  struct iavf_flow_parser_node;
>  TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
> 
> +struct iavf_fdir_conf {
> +	struct virtchnl_fdir_add add_fltr;
> +	struct virtchnl_fdir_del del_fltr;
> +	uint64_t input_set;
> +	uint32_t flow_id;
> +};
> +
> +struct iavf_fdir_info {
> +	struct iavf_fdir_conf conf;
> +};
> +
>  /* TODO: is that correct to assume the max number to be 16 ?*/
>  #define IAVF_MAX_MSIX_VECTORS   16
> 
> @@ -145,6 +156,8 @@ struct iavf_info {
>  	struct iavf_flow_list flow_list;
>  	struct iavf_parser_list rss_parser_list;
>  	struct iavf_parser_list dist_parser_list;
> +
> +	struct iavf_fdir_info fdir; /* flow director info */
>  };
> 
>  #define IAVF_MAX_PKT_TYPE 1024
> @@ -270,4 +283,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
> int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
> int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
>  			 struct virtchnl_rss_cfg *rss_cfg, bool add);
> +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> +*filter); int iavf_fdir_del(struct iavf_adapter *adapter, struct
> +iavf_fdir_conf *filter); int iavf_fdir_check(struct iavf_adapter *adapter,
> +		struct iavf_fdir_conf *filter);
>  #endif /* _IAVF_ETHDEV_H_ */
> diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c new file
> mode 100644 index 0000000..ea529b6
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_fdir.c
> @@ -0,0 +1,749 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_malloc.h>
> +#include <rte_tailq.h>
> +
> +#include "iavf.h"
> +#include "iavf_generic_flow.h"
> +#include "virtchnl.h"
> +
> +#define IAVF_FDIR_MAX_QREGION_SIZE 128
> +
> +#define IAVF_FDIR_IPV6_TC_OFFSET 20
> +#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
> +
> +#define IAVF_FDIR_INSET_ETH (\
> +	IAVF_INSET_ETHERTYPE)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4 (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
> +	IAVF_INSET_IPV4_TTL)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> +	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> +	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> +	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6 (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
> +	IAVF_INSET_IPV6_HOP_LIMIT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> +	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> +	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> +	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> +
> +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
> +	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_udp,
> 	IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_tcp,
> 	IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv4_sctp,
> 	IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,
> 		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6_udp,
> 	IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6_tcp,
> 	IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
> +	{iavf_pattern_eth_ipv6_sctp,
> 	IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
> +};
> +
> +static struct iavf_flow_parser iavf_fdir_parser;
> +
> +static int
> +iavf_fdir_init(struct iavf_adapter *ad) {
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> +	struct iavf_flow_parser *parser;
> +
> +	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
> +		parser = &iavf_fdir_parser;
> +	else
> +		return -ENOTSUP;
> +
> +	return iavf_register_parser(parser, ad); }
> +
> +static void
> +iavf_fdir_uninit(struct iavf_adapter *ad) {
> +	struct iavf_flow_parser *parser;
> +
> +	parser = &iavf_fdir_parser;
> +
> +	iavf_unregister_parser(parser, ad);
> +}
> +
> +static int
> +iavf_fdir_create(struct iavf_adapter *ad,
> +		struct rte_flow *flow,
> +		void *meta,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_fdir_conf *filter = meta;
> +	struct iavf_fdir_conf *rule;
> +	int ret;
> +
> +	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
> +	if (!rule) {
> +		rte_flow_error_set(error, ENOMEM,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Failed to allocate memory");
> +		return -rte_errno;
> +	}
> +
> +	ret = iavf_fdir_add(ad, filter);
> +	if (ret) {
> +		rte_flow_error_set(error, -ret,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Add filter rule failed.");
> +		goto free_entry;
> +	}
> +
> +	rte_memcpy(rule, filter, sizeof(*rule));
> +	flow->rule = rule;
> +
> +	return 0;
> +
> +free_entry:
> +	rte_free(rule);
> +	return -rte_errno;
> +}
> +
> +static int
> +iavf_fdir_destroy(struct iavf_adapter *ad,
> +		struct rte_flow *flow,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_fdir_conf *filter;
> +	int ret;
> +
> +	filter = (struct iavf_fdir_conf *)flow->rule;
> +
> +	ret = iavf_fdir_del(ad, filter);
> +	if (ret) {
> +		rte_flow_error_set(error, -ret,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Del filter rule failed.");
> +		return -rte_errno;
> +	}
> +
> +	flow->rule = NULL;
> +	rte_free(filter);
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_validation(struct iavf_adapter *ad,
> +		__rte_unused struct rte_flow *flow,
> +		void *meta,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_fdir_conf *filter = meta;
> +	int ret;
> +
> +	ret = iavf_fdir_check(ad, filter);
> +	if (ret) {
> +		rte_flow_error_set(error, -ret,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"Validate filter rule failed.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +};
> +
> +static struct iavf_flow_engine iavf_fdir_engine = {
> +	.init = iavf_fdir_init,
> +	.uninit = iavf_fdir_uninit,
> +	.create = iavf_fdir_create,
> +	.destroy = iavf_fdir_destroy,
> +	.validation = iavf_fdir_validation,
> +	.type = IAVF_FLOW_ENGINE_FDIR,
> +};
> +
> +static int
> +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
> +			struct rte_flow_error *error,
> +			const struct rte_flow_action *act,
> +			struct virtchnl_filter_action *filter_action) {
> +	const struct rte_flow_action_rss *rss = act->conf;
> +	uint32_t i;
> +
> +	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"Invalid action.");
> +		return -rte_errno;
> +	}
> +
> +	if (rss->queue_num <= 1) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"Queue region size can't be 0 or 1.");
> +		return -rte_errno;
> +	}
> +
> +	/* check if queue index for queue region is continuous */
> +	for (i = 0; i < rss->queue_num - 1; i++) {
> +		if (rss->queue[i + 1] != rss->queue[i] + 1) {
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ACTION, act,
> +					"Discontinuous queue region");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data-
> >nb_rx_queues) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"Invalid queue region indexes.");
> +		return -rte_errno;
> +	}
> +
> +	if (!(rte_is_power_of_2(rss->queue_num) &&
> +		rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION, act,
> +				"The region size should be any of the following
> values:"
> +				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total
> number "
> +				"of queues do not exceed the VSI allocation.");
> +		return -rte_errno;
> +	}
> +
> +	filter_action->q_index = rss->queue[0];
> +	filter_action->q_region = rte_fls_u32(rss->queue_num) - 1;
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_parse_action(struct iavf_adapter *ad,
> +			const struct rte_flow_action actions[],
> +			struct rte_flow_error *error,
> +			struct iavf_fdir_conf *filter)
> +{
> +	const struct rte_flow_action_queue *act_q;
> +	uint32_t dest_num = 0;
> +	int ret;
> +
> +	int number = 0;
> +	struct virtchnl_filter_action *filter_action;
> +
> +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> +		switch (actions->type) {
> +		case RTE_FLOW_ACTION_TYPE_VOID:
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
> +			dest_num++;
> +
> +			filter_action =
> +&filter->add_fltr.rule_cfg.action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
> +
> +			filter->add_fltr.rule_cfg.action_set.count = ++number;
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_DROP:
> +			dest_num++;
> +
> +			filter_action =
> +&filter->add_fltr.rule_cfg.action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_ACTION_DROP;
> +
> +			filter->add_fltr.rule_cfg.action_set.count = ++number;
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_QUEUE:
> +			dest_num++;
> +
> +			act_q = actions->conf;
> +			filter_action =
> +&filter->add_fltr.rule_cfg.action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_ACTION_QUEUE;
> +			filter_action->q_index = act_q->index;
> +
> +			if (filter_action->q_index >=
> +				ad->eth_dev->data->nb_rx_queues) {
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ACTION,
> +					actions, "Invalid queue for FDIR.");
> +				return -rte_errno;
> +			}
> +
> +			filter->add_fltr.rule_cfg.action_set.count = ++number;
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_RSS:
> +			dest_num++;
> +
> +			filter_action =
> +&filter->add_fltr.rule_cfg.action_set.actions[number];
> +
> +			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
> +
> +			ret = iavf_fdir_parse_action_qregion(ad,
> +						error, actions, filter_action);
> +			if (ret)
> +				return ret;
> +
> +			filter->add_fltr.rule_cfg.action_set.count = ++number;
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ACTION,
> actions,
> +					"Invalid action.");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +			"Action numbers exceed the maximum value");
> +		return -rte_errno;
> +	}
> +
> +	if (dest_num == 0 || dest_num >= 2) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +			"Unsupported action combination");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
> +			const struct rte_flow_item pattern[],
> +			struct rte_flow_error *error,
> +			struct iavf_fdir_conf *filter)
> +{
> +	const struct rte_flow_item *item = pattern;
> +	enum rte_flow_item_type item_type;
> +	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
> +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	uint64_t input_set = IAVF_INSET_NONE;
> +
> +	enum rte_flow_item_type next_type;
> +	uint16_t ether_type;
> +
> +	int layer = 0;
> +	struct virtchnl_proto_hdr *hdr;
> +
> +	uint8_t  ipv6_addr_mask[16] = {
> +		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> +		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
> +	};
> +
> +	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM, item,
> +					"Not support range");
> +		}
> +
> +		item_type = item->type;
> +
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = item->spec;
> +			eth_mask = item->mask;
> +			next_type = (item + 1)->type;
> +
> +			hdr = &filter-
> >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
> +
> +			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
> +				(!eth_spec || !eth_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "NULL eth spec/mask.");
> +				return -rte_errno;
> +			}
> +
> +			if (eth_spec && eth_mask) {
> +				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
> +				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid MAC_addr mask.");
> +					return -rte_errno;
> +				}
> +			}
> +
> +			if (eth_spec && eth_mask && eth_mask->type) {
> +				if (eth_mask->type != RTE_BE16(0xffff)) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid type mask.");
> +					return -rte_errno;
> +				}
> +
> +				ether_type = rte_be_to_cpu_16(eth_spec-
> >type);
> +				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
> +					ether_type == RTE_ETHER_TYPE_IPV6)
> {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item,
> +						"Unsupported ether_type.");
> +					return -rte_errno;
> +				}
> +
> +				input_set |= IAVF_INSET_ETHERTYPE;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> ETH, ETHERTYPE);
> +
> +				rte_memcpy(hdr->buffer,
> +					eth_spec, sizeof(*eth_spec));
> +			}
> +
> +			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +
> +			hdr = &filter-
> >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
> +
> +			if (ipv4_spec && ipv4_mask) {
> +				if (ipv4_mask->hdr.version_ihl ||
> +					ipv4_mask->hdr.total_length ||
> +					ipv4_mask->hdr.packet_id ||
> +					ipv4_mask->hdr.fragment_offset ||
> +					ipv4_mask->hdr.hdr_checksum) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid IPv4 mask.");
> +					return -rte_errno;
> +				}
> +
> +				if (ipv4_mask->hdr.type_of_service ==
> +								UINT8_MAX) {
> +					input_set |= IAVF_INSET_IPV4_TOS;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
> +				}
> +				if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX) {
> +					input_set |= IAVF_INSET_IPV4_PROTO;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
> +				}
> +				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> {
> +					input_set |= IAVF_INSET_IPV4_TTL;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
> +				}
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> +					input_set |= IAVF_INSET_IPV4_SRC;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
> +				}
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> +					input_set |= IAVF_INSET_IPV4_DST;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
> +				}
> +
> +				rte_memcpy(hdr->buffer,
> +					&ipv4_spec->hdr,
> +					sizeof(ipv4_spec->hdr));
> +			}
> +
> +			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +
> +			hdr = &filter-
> >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
> +
> +			if (ipv6_spec && ipv6_mask) {
> +				if (ipv6_mask->hdr.payload_len) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> +						item, "Invalid IPv6 mask");
> +					return -rte_errno;
> +				}
> +
> +				if ((ipv6_mask->hdr.vtc_flow &
> +
> 	rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> +					== rte_cpu_to_be_32(
> +							IAVF_IPV6_TC_MASK))
> {
> +					input_set |= IAVF_INSET_IPV6_TC;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
> +				}
> +				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> +					input_set |=
> IAVF_INSET_IPV6_NEXT_HDR;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
> +				}
> +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
> +					input_set |=
> IAVF_INSET_IPV6_HOP_LIMIT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
> +				}
> +				if (!memcmp(ipv6_mask->hdr.src_addr,
> +					ipv6_addr_mask,
> +					RTE_DIM(ipv6_mask->hdr.src_addr))) {
> +					input_set |= IAVF_INSET_IPV6_SRC;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
> +				}
> +				if (!memcmp(ipv6_mask->hdr.dst_addr,
> +					ipv6_addr_mask,
> +					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
> +					input_set |= IAVF_INSET_IPV6_DST;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
> +				}
> +
> +				rte_memcpy(hdr->buffer,
> +					&ipv6_spec->hdr,
> +					sizeof(ipv6_spec->hdr));
> +			}
> +
> +			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			udp_spec = item->spec;
> +			udp_mask = item->mask;
> +
> +			hdr = &filter-
> >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
> +
> +			if (udp_spec && udp_mask) {
> +				if (udp_mask->hdr.dgram_len ||
> +					udp_mask->hdr.dgram_cksum) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid UDP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (udp_mask->hdr.src_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_UDP_SRC_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
> +				}
> +				if (udp_mask->hdr.dst_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_UDP_DST_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
> +				}
> +
> +				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> +					rte_memcpy(hdr->buffer,
> +						&udp_spec->hdr,
> +						sizeof(udp_spec->hdr));
> +				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> +					rte_memcpy(hdr->buffer,
> +						&udp_spec->hdr,
> +						sizeof(udp_spec->hdr));
> +			}
> +
> +			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			tcp_spec = item->spec;
> +			tcp_mask = item->mask;
> +
> +			hdr = &filter-
> >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
> +
> +			if (tcp_spec && tcp_mask) {
> +				if (tcp_mask->hdr.sent_seq ||
> +					tcp_mask->hdr.recv_ack ||
> +					tcp_mask->hdr.data_off ||
> +					tcp_mask->hdr.tcp_flags ||
> +					tcp_mask->hdr.rx_win ||
> +					tcp_mask->hdr.cksum ||
> +					tcp_mask->hdr.tcp_urp) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid TCP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (tcp_mask->hdr.src_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_TCP_SRC_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
> +				}
> +				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_TCP_DST_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
> +				}
> +
> +				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> +					rte_memcpy(hdr->buffer,
> +						&tcp_spec->hdr,
> +						sizeof(tcp_spec->hdr));
> +				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> +					rte_memcpy(hdr->buffer,
> +						&tcp_spec->hdr,
> +						sizeof(tcp_spec->hdr));
> +			}
> +
> +			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_SCTP:
> +			sctp_spec = item->spec;
> +			sctp_mask = item->mask;
> +
> +			hdr = &filter-
> >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> +
> +			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
> +
> +			if (sctp_spec && sctp_mask) {
> +				if (sctp_mask->hdr.cksum) {
> +					rte_flow_error_set(error, EINVAL,
> +
> 	RTE_FLOW_ERROR_TYPE_ITEM, item,
> +						"Invalid UDP mask");
> +					return -rte_errno;
> +				}
> +
> +				if (sctp_mask->hdr.src_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_SCTP_SRC_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
> +				}
> +				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
> +					input_set |=
> IAVF_INSET_SCTP_DST_PORT;
> +
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
> +				}
> +
> +				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> +					rte_memcpy(hdr->buffer,
> +						&sctp_spec->hdr,
> +						sizeof(sctp_spec->hdr));
> +				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> +					rte_memcpy(hdr->buffer,
> +						&sctp_spec->hdr,
> +						sizeof(sctp_spec->hdr));
> +			}
> +
> +			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VOID:
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM, item,
> +					"Invalid pattern item.");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM, item,
> +			"Protocol header layers exceed the maximum value");
> +		return -rte_errno;
> +	}
> +
> +	filter->input_set = input_set;
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_fdir_parse(struct iavf_adapter *ad,
> +		struct iavf_pattern_match_item *array,
> +		uint32_t array_len,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		void **meta,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> +	struct iavf_fdir_conf *filter = &vf->fdir.conf;
> +	struct iavf_pattern_match_item *item = NULL;
> +	uint64_t input_set;
> +	int ret;
> +
> +	memset(filter, 0, sizeof(*filter));
> +
> +	item = iavf_search_pattern_match_item(pattern, array, array_len,
> error);
> +	if (!item)
> +		return -rte_errno;
> +
> +	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
> +	if (ret)
> +		goto error;
> +
> +	input_set = filter->input_set;
> +	if (!input_set || input_set & ~item->input_set_mask) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
> +				"Invalid input set");
> +		ret = -rte_errno;
> +		goto error;
> +	}
> +
> +	ret = iavf_fdir_parse_action(ad, actions, error, filter);
> +	if (ret)
> +		goto error;
> +
> +	if (meta)
> +		*meta = filter;
> +
> +error:
> +	rte_free(item);
> +	return ret;
> +}
> +
> +static struct iavf_flow_parser iavf_fdir_parser = {
> +	.engine = &iavf_fdir_engine,
> +	.array = iavf_fdir_pattern,
> +	.array_len = RTE_DIM(iavf_fdir_pattern),
> +	.parse_pattern_action = iavf_fdir_parse,
> +	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
> +};
> +
> +RTE_INIT(iavf_fdir_engine_register)
> +{
> +	iavf_register_flow_engine(&iavf_fdir_engine);
> +}
> diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index
> 2307969..133e81c 100644
> --- a/drivers/net/iavf/iavf_vchnl.c
> +++ b/drivers/net/iavf/iavf_vchnl.c
> @@ -339,7 +339,8 @@
>  	caps = IAVF_BASIC_OFFLOAD_CAPS |
> VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
>  		VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
>  		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
> -		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
> +		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
> +		VIRTCHNL_VF_OFFLOAD_FDIR_PF;
> 
>  	args.in_args = (uint8_t *)&caps;
>  	args.in_args_size = sizeof(caps);
> @@ -906,3 +907,152 @@
> 
>  	return err;
>  }
> +
> +int
> +iavf_fdir_add(struct iavf_adapter *adapter,
> +	struct iavf_fdir_conf *filter)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +	struct virtchnl_fdir_add *fdir_ret;
> +
> +	struct iavf_cmd_info args;
> +	int err;
> +
> +	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
> +	filter->add_fltr.validate_only = 0;
> +
> +	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> +	args.in_args = (uint8_t *)(&filter->add_fltr);
> +	args.in_args_size = sizeof(*(&filter->add_fltr));
> +	args.out_buffer = vf->aq_resp;
> +	args.out_size = IAVF_AQ_BUF_SZ;
> +
> +	err = iavf_execute_vf_cmd(adapter, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR, "fail to execute command
> OP_ADD_FDIR_FILTER");
> +		return err;
> +	}
> +
> +	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
> +	filter->flow_id = fdir_ret->flow_id;
> +
> +	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
> +		PMD_DRV_LOG(INFO,
> +			"add rule request is successfully done by PF");
> +	} else if (fdir_ret->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
> +		PMD_DRV_LOG(ERR,
> +			"add rule request is failed due to no hw resource");
> +		return -1;
> +	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT)
> {
> +		PMD_DRV_LOG(ERR,
> +			"add rule request is failed due to the rule is already
> existed");
> +		return -1;
[Cao, Yahui] 
The logic here is when VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT is found, printf " already existed"
But due to virtchnl definition:
 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
* OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
*
* VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
* OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
RULE_CONFLCIT means conflict with existing rule while
RULE_EXIST means rule is already existed
So I think you miss  VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition and may mismatch the error log.

> +	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
> +		PMD_DRV_LOG(ERR,
> +			"add rule request is failed due to the hw doesn't
> support");
> +		return -1;
> +	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> {
> +		PMD_DRV_LOG(ERR,
> +			"add rule request is failed due to time out for
> programming");
> +		return -1;
> +	} else {
> +		PMD_DRV_LOG(ERR,
> +			"add rule request is failed due to other reasons");
> +		return -1;
> +	}
> +
> +	return 0;
> +};
> +
> +int
> +iavf_fdir_del(struct iavf_adapter *adapter,
> +	struct iavf_fdir_conf *filter)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +	struct virtchnl_fdir_del *fdir_ret;
> +
> +	struct iavf_cmd_info args;
> +	int err;
> +
> +	filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
> +	filter->del_fltr.flow_id = filter->flow_id;
> +
> +	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
> +	args.in_args = (uint8_t *)(&filter->del_fltr);
> +	args.in_args_size = sizeof(filter->del_fltr);
> +	args.out_buffer = vf->aq_resp;
> +	args.out_size = IAVF_AQ_BUF_SZ;
> +
> +	err = iavf_execute_vf_cmd(adapter, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR, "fail to execute command
> OP_DEL_FDIR_FILTER");
> +		return err;
> +	}
> +
> +	fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
> +
> +	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
> +		PMD_DRV_LOG(INFO,
> +			"delete rule request is successfully done by PF");
> +	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST)
> {
> +		PMD_DRV_LOG(ERR,
> +			"delete rule request is failed due to this rule doesn't
> exist");
> +		return -1;
> +	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> {
> +		PMD_DRV_LOG(ERR,
> +			"delete rule request is failed due to time out for
> programming");
> +		return -1;
> +	} else {
> +		PMD_DRV_LOG(ERR,
> +			"delete rule request is failed due to other reasons");
> +		return -1;
> +	}
> +
> +	return 0;
> +};
> +
> +int
> +iavf_fdir_check(struct iavf_adapter *adapter,
> +		struct iavf_fdir_conf *filter)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +	struct virtchnl_fdir_add *fdir_ret;
> +
> +	struct iavf_cmd_info args;
> +	int err;
> +
> +	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
> +	filter->add_fltr.validate_only = 1;
> +
> +	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> +	args.in_args = (uint8_t *)(&filter->add_fltr);
> +	args.in_args_size = sizeof(*(&filter->add_fltr));
> +	args.out_buffer = vf->aq_resp;
> +	args.out_size = IAVF_AQ_BUF_SZ;
> +
> +	err = iavf_execute_vf_cmd(adapter, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
> +		return err;
> +	}
> +
> +	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
> +
> +	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
> +		PMD_DRV_LOG(INFO,
> +			"check rule request is successfully done by PF");
> +	}  else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
> +		PMD_DRV_LOG(ERR,
> +			"check rule request is failed due to parameters
> validation"
> +			" or HW doesn't support");
> +		return -1;
> +	} else {
> +		PMD_DRV_LOG(ERR,
> +			"check rule request is failed due to other reasons");
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +
> diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index
> 5a5cdd5..f875b72 100644
> --- a/drivers/net/iavf/meson.build
> +++ b/drivers/net/iavf/meson.build
> @@ -14,6 +14,7 @@ sources = files(
>  	'iavf_vchnl.c',
>  	'iavf_generic_flow.c',
>  	'iavf_hash.c',
> +	'iavf_fdir.c',
>  )
> 
>  if arch_subdir == 'x86'
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
  2020-04-10  7:40     ` Cao, Yahui
@ 2020-04-10  8:00       ` Su, Simei
  0 siblings, 0 replies; 43+ messages in thread
From: Su, Simei @ 2020-04-10  8:00 UTC (permalink / raw)
  To: Cao, Yahui, Zhang, Qi Z, Ye, Xiaolong, Wu, Jingjing; +Cc: dev

Hi, Yahui

> -----Original Message-----
> From: Cao, Yahui <yahui.cao@intel.com>
> Sent: Friday, April 10, 2020 3:40 PM
> To: Su, Simei <simei.su@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Ye,
> Xiaolong <xiaolong.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
> 
> 
> 
> > -----Original Message-----
> > From: Su, Simei <simei.su@intel.com>
> > Sent: Thursday, April 2, 2020 9:33 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong
> > <xiaolong.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Su, Simei
> > <simei.su@intel.com>
> > Subject: [PATCH v2 1/5] net/iavf: add support for FDIR basic rule
> >
> > This patch adds FDIR create/destroy/validate function in AVF.
> > Common pattern and queue/qgroup/passthru/drop actions are supported.
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > ---
> >  drivers/net/iavf/Makefile     |   1 +
> >  drivers/net/iavf/iavf.h       |  17 +
> >  drivers/net/iavf/iavf_fdir.c  | 749
> > ++++++++++++++++++++++++++++++++++++++++++
> >  drivers/net/iavf/iavf_vchnl.c | 152 ++++++++-
> >  drivers/net/iavf/meson.build  |   1 +
> >  5 files changed, 919 insertions(+), 1 deletion(-)  create mode 100644
> > drivers/net/iavf/iavf_fdir.c
> >
> > diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
> > index
> > 7b0093a..b2b75d7 100644
> > --- a/drivers/net/iavf/Makefile
> > +++ b/drivers/net/iavf/Makefile
> > @@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) +=
> iavf_vchnl.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
> > +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
> >  ifeq ($(CONFIG_RTE_ARCH_X86), y)
> >  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c  endif
> diff
> > --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> > afec8b2..e2b1d5f
> > 100644
> > --- a/drivers/net/iavf/iavf.h
> > +++ b/drivers/net/iavf/iavf.h
> > @@ -106,6 +106,17 @@ struct iavf_vsi {  struct iavf_flow_parser_node;
> > TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
> >
> > +struct iavf_fdir_conf {
> > +struct virtchnl_fdir_add add_fltr;
> > +struct virtchnl_fdir_del del_fltr;
> > +uint64_t input_set;
> > +uint32_t flow_id;
> > +};
> > +
> > +struct iavf_fdir_info {
> > +struct iavf_fdir_conf conf;
> > +};
> > +
> >  /* TODO: is that correct to assume the max number to be 16 ?*/
> >  #define IAVF_MAX_MSIX_VECTORS   16
> >
> > @@ -145,6 +156,8 @@ struct iavf_info {  struct iavf_flow_list
> > flow_list;  struct iavf_parser_list rss_parser_list;  struct
> > iavf_parser_list dist_parser_list;
> > +
> > +struct iavf_fdir_info fdir; /* flow director info */
> >  };
> >
> >  #define IAVF_MAX_PKT_TYPE 1024
> > @@ -270,4 +283,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter
> > *adapter, int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t
> > vlanid, bool add); int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
> >   struct virtchnl_rss_cfg *rss_cfg, bool add);
> > +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter); int iavf_fdir_del(struct iavf_adapter *adapter, struct
> > +iavf_fdir_conf *filter); int iavf_fdir_check(struct iavf_adapter
> > +*adapter, struct iavf_fdir_conf *filter);
> >  #endif /* _IAVF_ETHDEV_H_ */
> > diff --git a/drivers/net/iavf/iavf_fdir.c
> > b/drivers/net/iavf/iavf_fdir.c new file mode 100644 index
> > 0000000..ea529b6
> > --- /dev/null
> > +++ b/drivers/net/iavf/iavf_fdir.c
> > @@ -0,0 +1,749 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2019 Intel Corporation  */
> > +
> > +#include <sys/queue.h>
> > +#include <stdio.h>
> > +#include <errno.h>
> > +#include <stdint.h>
> > +#include <string.h>
> > +#include <unistd.h>
> > +#include <stdarg.h>
> > +
> > +#include <rte_ether.h>
> > +#include <rte_ethdev_driver.h>
> > +#include <rte_malloc.h>
> > +#include <rte_tailq.h>
> > +
> > +#include "iavf.h"
> > +#include "iavf_generic_flow.h"
> > +#include "virtchnl.h"
> > +
> > +#define IAVF_FDIR_MAX_QREGION_SIZE 128
> > +
> > +#define IAVF_FDIR_IPV6_TC_OFFSET 20
> > +#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
> > +
> > +#define IAVF_FDIR_INSET_ETH (\
> > +IAVF_INSET_ETHERTYPE)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4 (\
> > +IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> IAVF_INSET_IPV4_PROTO |
> > +IAVF_INSET_IPV4_TOS | \
> > +IAVF_INSET_IPV4_TTL)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\ IAVF_INSET_IPV4_SRC |
> > +IAVF_INSET_IPV4_DST | \ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL
> | \
> > +IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6 (\
> > +IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> > +IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
> > +IAVF_INSET_IPV6_HOP_LIMIT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_UDP_SRC_PORT |
> > +IAVF_INSET_UDP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_TCP_SRC_PORT |
> > +IAVF_INSET_TCP_DST_PORT)
> > +
> > +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\ IAVF_INSET_IPV6_SRC |
> > +IAVF_INSET_IPV6_DST | \ IAVF_INSET_IPV6_TC |
> > +IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_SCTP_SRC_PORT |
> > +IAVF_INSET_SCTP_DST_PORT)
> > +
> > +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
> > +{iavf_pattern_ethertype,IAVF_FDIR_INSET_ETH,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4,IAVF_FDIR_INSET_ETH_IPV4,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_udp,
> > IAVF_FDIR_INSET_ETH_IPV4_UDP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_tcp,
> > IAVF_FDIR_INSET_ETH_IPV4_TCP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv4_sctp,
> > IAVF_FDIR_INSET_ETH_IPV4_SCTP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6,IAVF_FDIR_INSET_ETH_IPV6,
> > IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_udp,
> > IAVF_FDIR_INSET_ETH_IPV6_UDP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_tcp,
> > IAVF_FDIR_INSET_ETH_IPV6_TCP,IAVF_INSET_NONE},
> > +{iavf_pattern_eth_ipv6_sctp,
> > IAVF_FDIR_INSET_ETH_IPV6_SCTP,IAVF_INSET_NONE},
> > +};
> > +
> > +static struct iavf_flow_parser iavf_fdir_parser;
> > +
> > +static int
> > +iavf_fdir_init(struct iavf_adapter *ad) { struct iavf_info *vf =
> > +IAVF_DEV_PRIVATE_TO_VF(ad); struct iavf_flow_parser *parser;
> > +
> > +if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) parser =
> > +&iavf_fdir_parser; else return -ENOTSUP;
> > +
> > +return iavf_register_parser(parser, ad); }
> > +
> > +static void
> > +iavf_fdir_uninit(struct iavf_adapter *ad) { struct iavf_flow_parser
> > +*parser;
> > +
> > +parser = &iavf_fdir_parser;
> > +
> > +iavf_unregister_parser(parser, ad);
> > +}
> > +
> > +static int
> > +iavf_fdir_create(struct iavf_adapter *ad, struct rte_flow *flow, void
> > +*meta, struct rte_flow_error *error) { struct iavf_fdir_conf *filter
> > += meta; struct iavf_fdir_conf *rule; int ret;
> > +
> > +rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); if (!rule) {
> > +rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> > +"Failed to allocate memory"); return -rte_errno; }
> > +
> > +ret = iavf_fdir_add(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Add filter rule failed.");
> > +goto free_entry;
> > +}
> > +
> > +rte_memcpy(rule, filter, sizeof(*rule));
> > +flow->rule = rule;
> > +
> > +return 0;
> > +
> > +free_entry:
> > +rte_free(rule);
> > +return -rte_errno;
> > +}
> > +
> > +static int
> > +iavf_fdir_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
> > +struct rte_flow_error *error) { struct iavf_fdir_conf *filter; int
> > +ret;
> > +
> > +filter = (struct iavf_fdir_conf *)flow->rule;
> > +
> > +ret = iavf_fdir_del(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Del filter rule failed.");
> > +return -rte_errno;
> > +}
> > +
> > +flow->rule = NULL;
> > +rte_free(filter);
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_validation(struct iavf_adapter *ad, __rte_unused struct
> > +rte_flow *flow, void *meta, struct rte_flow_error *error) { struct
> > +iavf_fdir_conf *filter = meta; int ret;
> > +
> > +ret = iavf_fdir_check(ad, filter);
> > +if (ret) {
> > +rte_flow_error_set(error, -ret,
> > +RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +"Validate filter rule failed.");
> > +return -rte_errno;
> > +}
> > +
> > +return 0;
> > +};
> > +
> > +static struct iavf_flow_engine iavf_fdir_engine = { .init =
> > +iavf_fdir_init, .uninit = iavf_fdir_uninit, .create =
> > +iavf_fdir_create, .destroy = iavf_fdir_destroy, .validation =
> > +iavf_fdir_validation, .type = IAVF_FLOW_ENGINE_FDIR, };
> > +
> > +static int
> > +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad, struct
> > +rte_flow_error *error, const struct rte_flow_action *act, struct
> > +virtchnl_filter_action *filter_action) { const struct
> > +rte_flow_action_rss *rss = act->conf; uint32_t i;
> > +
> > +if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> > +rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Invalid action."); return -rte_errno; }
> > +
> > +if (rss->queue_num <= 1) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Queue region size can't be 0 or 1."); return -rte_errno; }
> > +
> > +/* check if queue index for queue region is continuous */ for (i = 0;
> > +i < rss->queue_num - 1; i++) { if (rss->queue[i + 1] != rss->queue[i]
> > ++ 1) { rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> > +act, "Discontinuous queue region"); return -rte_errno; } }
> > +
> > +if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data-
> > >nb_rx_queues) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"Invalid queue region indexes.");
> > +return -rte_errno;
> > +}
> > +
> > +if (!(rte_is_power_of_2(rss->queue_num) &&
> > +rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION, act,
> > +"The region size should be any of the following
> > values:"
> > +"1, 2, 4, 8, 16, 32, 64, 128 as long as the total
> > number "
> > +"of queues do not exceed the VSI allocation."); return -rte_errno; }
> > +
> > +filter_action->q_index = rss->queue[0]; filter_action->q_region =
> > +rte_fls_u32(rss->queue_num) - 1;
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse_action(struct iavf_adapter *ad, const struct
> > +rte_flow_action actions[], struct rte_flow_error *error, struct
> > +iavf_fdir_conf *filter) { const struct rte_flow_action_queue *act_q;
> > +uint32_t dest_num = 0; int ret;
> > +
> > +int number = 0;
> > +struct virtchnl_filter_action *filter_action;
> > +
> > +for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch
> > +(actions->type) { case RTE_FLOW_ACTION_TYPE_VOID:
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_PASSTHRU:
> > +dest_num++;
> > +
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_DROP:
> > +dest_num++;
> > +
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_DROP;
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_QUEUE:
> > +dest_num++;
> > +
> > +act_q = actions->conf;
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_QUEUE; filter_action->q_index =
> > +act_q->index;
> > +
> > +if (filter_action->q_index >=
> > +ad->eth_dev->data->nb_rx_queues) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION,
> > +actions, "Invalid queue for FDIR.");
> > +return -rte_errno;
> > +}
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +case RTE_FLOW_ACTION_TYPE_RSS:
> > +dest_num++;
> > +
> > +filter_action =
> > +&filter->add_fltr.rule_cfg.action_set.actions[number];
> > +
> > +filter_action->type = VIRTCHNL_ACTION_Q_REGION;
> > +
> > +ret = iavf_fdir_parse_action_qregion(ad,
> > +error, actions, filter_action);
> > +if (ret)
> > +return ret;
> > +
> > +filter->add_fltr.rule_cfg.action_set.count = ++number;
> > +break;
> > +
> > +default:
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ACTION,
> > actions,
> > +"Invalid action.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (number > VIRTCHNL_MAX_NUM_ACTIONS) { rte_flow_error_set(error,
> > +EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Action numbers
> exceed
> > +the maximum value"); return -rte_errno; }
> > +
> > +if (dest_num == 0 || dest_num >= 2) { rte_flow_error_set(error,
> > +EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Unsupported action
> > +combination"); return -rte_errno; }
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, const
> > +struct rte_flow_item pattern[], struct rte_flow_error *error, struct
> > +iavf_fdir_conf *filter) { const struct rte_flow_item *item = pattern;
> > +enum rte_flow_item_type item_type; enum rte_flow_item_type l3 =
> > +RTE_FLOW_ITEM_TYPE_END; const struct rte_flow_item_eth *eth_spec,
> > +*eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> > +const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; const struct
> > +rte_flow_item_udp *udp_spec, *udp_mask; const struct
> > +rte_flow_item_tcp *tcp_spec, *tcp_mask; const struct
> > +rte_flow_item_sctp *sctp_spec, *sctp_mask; uint64_t input_set =
> > +IAVF_INSET_NONE;
> > +
> > +enum rte_flow_item_type next_type;
> > +uint16_t ether_type;
> > +
> > +int layer = 0;
> > +struct virtchnl_proto_hdr *hdr;
> > +
> > +uint8_t  ipv6_addr_mask[16] = {
> > +0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> > +0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
> > +
> > +for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> > {
> > +if (item->last) {
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Not support range");
> > +}
> > +
> > +item_type = item->type;
> > +
> > +switch (item_type) {
> > +case RTE_FLOW_ITEM_TYPE_ETH:
> > +eth_spec = item->spec;
> > +eth_mask = item->mask;
> > +next_type = (item + 1)->type;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
> > +
> > +if (next_type == RTE_FLOW_ITEM_TYPE_END && (!eth_spec
> || !eth_mask))
> > +{ rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "NULL eth spec/mask.");
> > +return -rte_errno;
> > +}
> > +
> > +if (eth_spec && eth_mask) {
> > +if (!rte_is_zero_ether_addr(&eth_mask->src) ||
> > +    !rte_is_zero_ether_addr(&eth_mask->dst)) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid MAC_addr mask.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (eth_spec && eth_mask && eth_mask->type) { if (eth_mask->type !=
> > +RTE_BE16(0xffff)) { rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid type mask.");
> > +return -rte_errno;
> > +}
> > +
> > +ether_type = rte_be_to_cpu_16(eth_spec-
> > >type);
> > +if (ether_type == RTE_ETHER_TYPE_IPV4 || ether_type ==
> > +RTE_ETHER_TYPE_IPV6)
> > {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item,
> > +"Unsupported ether_type.");
> > +return -rte_errno;
> > +}
> > +
> > +input_set |= IAVF_INSET_ETHERTYPE;
> > +VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> > ETH, ETHERTYPE);
> > +
> > +rte_memcpy(hdr->buffer,
> > +eth_spec, sizeof(*eth_spec));
> > +}
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_IPV4:
> > +l3 = RTE_FLOW_ITEM_TYPE_IPV4;
> > +ipv4_spec = item->spec;
> > +ipv4_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
> > +
> > +if (ipv4_spec && ipv4_mask) {
> > +if (ipv4_mask->hdr.version_ihl ||
> > +ipv4_mask->hdr.total_length ||
> > +ipv4_mask->hdr.packet_id ||
> > +ipv4_mask->hdr.fragment_offset ||
> > +ipv4_mask->hdr.hdr_checksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid IPv4 mask.");
> > +return -rte_errno;
> > +}
> > +
> > +if (ipv4_mask->hdr.type_of_service ==
> > +UINT8_MAX) {
> > +input_set |= IAVF_INSET_IPV4_TOS;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
> > +}
> > +if (ipv4_mask->hdr.next_proto_id ==
> > UINT8_MAX) {
> > +input_set |= IAVF_INSET_IPV4_PROTO;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
> > +}
> > +if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> > {
> > +input_set |= IAVF_INSET_IPV4_TTL;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
> > +}
> > +if (ipv4_mask->hdr.src_addr == UINT32_MAX) { input_set |=
> > +IAVF_INSET_IPV4_SRC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
> > +}
> > +if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { input_set |=
> > +IAVF_INSET_IPV4_DST;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
> > +}
> > +
> > +rte_memcpy(hdr->buffer,
> > +&ipv4_spec->hdr,
> > +sizeof(ipv4_spec->hdr));
> > +}
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_IPV6:
> > +l3 = RTE_FLOW_ITEM_TYPE_IPV6;
> > +ipv6_spec = item->spec;
> > +ipv6_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
> > +
> > +if (ipv6_spec && ipv6_mask) {
> > +if (ipv6_mask->hdr.payload_len) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +item, "Invalid IPv6 mask");
> > +return -rte_errno;
> > +}
> > +
> > +if ((ipv6_mask->hdr.vtc_flow &
> > +
> > rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> > +== rte_cpu_to_be_32(
> > +IAVF_IPV6_TC_MASK))
> > {
> > +input_set |= IAVF_INSET_IPV6_TC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
> > +}
> > +if (ipv6_mask->hdr.proto == UINT8_MAX) { input_set |=
> > IAVF_INSET_IPV6_NEXT_HDR;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
> > +}
> > +if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { input_set |=
> > IAVF_INSET_IPV6_HOP_LIMIT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
> > +}
> > +if (!memcmp(ipv6_mask->hdr.src_addr,
> > +ipv6_addr_mask,
> > +RTE_DIM(ipv6_mask->hdr.src_addr))) {
> > +input_set |= IAVF_INSET_IPV6_SRC;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
> > +}
> > +if (!memcmp(ipv6_mask->hdr.dst_addr,
> > +ipv6_addr_mask,
> > +RTE_DIM(ipv6_mask->hdr.dst_addr))) {
> > +input_set |= IAVF_INSET_IPV6_DST;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
> > +}
> > +
> > +rte_memcpy(hdr->buffer,
> > +&ipv6_spec->hdr,
> > +sizeof(ipv6_spec->hdr));
> > +}
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_UDP:
> > +udp_spec = item->spec;
> > +udp_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
> > +
> > +if (udp_spec && udp_mask) {
> > +if (udp_mask->hdr.dgram_len ||
> > +udp_mask->hdr.dgram_cksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid UDP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (udp_mask->hdr.src_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_UDP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
> > +}
> > +if (udp_mask->hdr.dst_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_UDP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&udp_spec->hdr,
> > +sizeof(udp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) rte_memcpy(hdr->buffer,
> > +&udp_spec->hdr, sizeof(udp_spec->hdr)); }
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_TCP:
> > +tcp_spec = item->spec;
> > +tcp_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
> > +
> > +if (tcp_spec && tcp_mask) {
> > +if (tcp_mask->hdr.sent_seq ||
> > +tcp_mask->hdr.recv_ack ||
> > +tcp_mask->hdr.data_off ||
> > +tcp_mask->hdr.tcp_flags ||
> > +tcp_mask->hdr.rx_win ||
> > +tcp_mask->hdr.cksum ||
> > +tcp_mask->hdr.tcp_urp) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid TCP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (tcp_mask->hdr.src_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_TCP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
> > +}
> > +if (tcp_mask->hdr.dst_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_TCP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&tcp_spec->hdr,
> > +sizeof(tcp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) rte_memcpy(hdr->buffer,
> > +&tcp_spec->hdr, sizeof(tcp_spec->hdr)); }
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_SCTP:
> > +sctp_spec = item->spec;
> > +sctp_mask = item->mask;
> > +
> > +hdr = &filter-
> > >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> > +
> > +VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
> > +
> > +if (sctp_spec && sctp_mask) {
> > +if (sctp_mask->hdr.cksum) {
> > +rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid UDP mask");
> > +return -rte_errno;
> > +}
> > +
> > +if (sctp_mask->hdr.src_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_SCTP_SRC_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
> > +}
> > +if (sctp_mask->hdr.dst_port == UINT16_MAX) { input_set |=
> > IAVF_INSET_SCTP_DST_PORT;
> > +
> > VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
> > +}
> > +
> > +if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> > +rte_memcpy(hdr->buffer,
> > +&sctp_spec->hdr,
> > +sizeof(sctp_spec->hdr));
> > +else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) rte_memcpy(hdr->buffer,
> > +&sctp_spec->hdr, sizeof(sctp_spec->hdr)); }
> > +
> > +filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
> > +break;
> > +
> > +case RTE_FLOW_ITEM_TYPE_VOID:
> > +break;
> > +
> > +default:
> > +rte_flow_error_set(error, EINVAL,
> > +RTE_FLOW_ERROR_TYPE_ITEM, item,
> > +"Invalid pattern item.");
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS)
> { rte_flow_error_set(error,
> > +EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Protocol header layers
> > +exceed the maximum value"); return -rte_errno; }
> > +
> > +filter->input_set = input_set;
> > +
> > +return 0;
> > +}
> > +
> > +static int
> > +iavf_fdir_parse(struct iavf_adapter *ad, struct
> > +iavf_pattern_match_item *array, uint32_t array_len, const struct
> > +rte_flow_item pattern[], const struct rte_flow_action actions[], void
> > +**meta, struct rte_flow_error *error) { struct iavf_info *vf =
> > +IAVF_DEV_PRIVATE_TO_VF(ad); struct iavf_fdir_conf *filter =
> > +&vf->fdir.conf; struct iavf_pattern_match_item *item = NULL; uint64_t
> > +input_set; int ret;
> > +
> > +memset(filter, 0, sizeof(*filter));
> > +
> > +item = iavf_search_pattern_match_item(pattern, array, array_len,
> > error);
> > +if (!item)
> > +return -rte_errno;
> > +
> > +ret = iavf_fdir_parse_pattern(ad, pattern, error, filter); if (ret)
> > +goto error;
> > +
> > +input_set = filter->input_set;
> > +if (!input_set || input_set & ~item->input_set_mask) {
> > +rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> > +pattern, "Invalid input set"); ret = -rte_errno; goto error; }
> > +
> > +ret = iavf_fdir_parse_action(ad, actions, error, filter); if (ret)
> > +goto error;
> > +
> > +if (meta)
> > +*meta = filter;
> > +
> > +error:
> > +rte_free(item);
> > +return ret;
> > +}
> > +
> > +static struct iavf_flow_parser iavf_fdir_parser = { .engine =
> > +&iavf_fdir_engine, .array = iavf_fdir_pattern, .array_len =
> > +RTE_DIM(iavf_fdir_pattern), .parse_pattern_action = iavf_fdir_parse,
> > +.stage = IAVF_FLOW_STAGE_DISTRIBUTOR, };
> > +
> > +RTE_INIT(iavf_fdir_engine_register)
> > +{
> > +iavf_register_flow_engine(&iavf_fdir_engine);
> > +}
> > diff --git a/drivers/net/iavf/iavf_vchnl.c
> > b/drivers/net/iavf/iavf_vchnl.c index 2307969..133e81c 100644
> > --- a/drivers/net/iavf/iavf_vchnl.c
> > +++ b/drivers/net/iavf/iavf_vchnl.c
> > @@ -339,7 +339,8 @@
> >  caps = IAVF_BASIC_OFFLOAD_CAPS |
> > VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
> >  VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
> >  VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
> > -VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
> > +VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
> > +VIRTCHNL_VF_OFFLOAD_FDIR_PF;
> >
> >  args.in_args = (uint8_t *)&caps;
> >  args.in_args_size = sizeof(caps);
> > @@ -906,3 +907,152 @@
> >
> >  return err;
> >  }
> > +
> > +int
> > +iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_add *fdir_ret;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->add_fltr.vsi_id = vf->vsi_res->vsi_id; add_fltr.validate_only
> > +filter->= 0;
> > +
> > +args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; args.in_args = (uint8_t
> > +*)(&filter->add_fltr); args.in_args_size =
> > +sizeof(*(&filter->add_fltr)); args.out_buffer = vf->aq_resp;
> > +args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args); if (err) {
> > +PMD_DRV_LOG(ERR, "fail to execute command
> > OP_ADD_FDIR_FILTER");
> > +return err;
> > +}
> > +
> > +fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
> > +filter->flow_id = fdir_ret->flow_id;
> > +
> > +if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { PMD_DRV_LOG(INFO,
> > +"add rule request is successfully done by PF"); } else if
> > +(fdir_ret->status ==
> > VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to no hw resource"); return -1; }
> > +else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to the rule is already
> > existed");
> > +return -1;
> [Cao, Yahui]
> The logic here is when VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT is found,
> printf " already existed"
> But due to virtchnl definition:
>  * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
> * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
> *
> * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
> * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
> RULE_CONFLCIT means conflict with existing rule while RULE_EXIST means
> rule is already existed So I think you miss
> VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition and may mismatch the error
> log.
> 

 Yes, you are right, here I miss VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition and mismatch the corresponding error log.
 Thanks.

Br
Simei

> > +} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
> > +PMD_DRV_LOG(ERR, "add rule request is failed due to the hw doesn't
> > support");
> > +return -1;
> > +} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to time out for
> > programming");
> > +return -1;
> > +} else {
> > +PMD_DRV_LOG(ERR,
> > +"add rule request is failed due to other reasons"); return -1; }
> > +
> > +return 0;
> > +};
> > +
> > +int
> > +iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_del *fdir_ret;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->del_fltr.vsi_id = vf->vsi_res->vsi_id; del_fltr.flow_id =
> > +filter->filter->flow_id;
> > +
> > +args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER; args.in_args = (uint8_t
> > +*)(&filter->del_fltr); args.in_args_size = sizeof(filter->del_fltr);
> > +args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args); if (err) {
> > +PMD_DRV_LOG(ERR, "fail to execute command
> > OP_DEL_FDIR_FILTER");
> > +return err;
> > +}
> > +
> > +fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
> > +
> > +if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { PMD_DRV_LOG(INFO,
> > +"delete rule request is successfully done by PF"); } else if
> > +(fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"delete rule request is failed due to this rule doesn't
> > exist");
> > +return -1;
> > +} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> > {
> > +PMD_DRV_LOG(ERR,
> > +"delete rule request is failed due to time out for
> > programming");
> > +return -1;
> > +} else {
> > +PMD_DRV_LOG(ERR,
> > +"delete rule request is failed due to other reasons"); return -1; }
> > +
> > +return 0;
> > +};
> > +
> > +int
> > +iavf_fdir_check(struct iavf_adapter *adapter, struct iavf_fdir_conf
> > +*filter) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> > +struct virtchnl_fdir_add *fdir_ret;
> > +
> > +struct iavf_cmd_info args;
> > +int err;
> > +
> > +filter->add_fltr.vsi_id = vf->vsi_res->vsi_id; add_fltr.validate_only
> > +filter->= 1;
> > +
> > +args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; args.in_args = (uint8_t
> > +*)(&filter->add_fltr); args.in_args_size =
> > +sizeof(*(&filter->add_fltr)); args.out_buffer = vf->aq_resp;
> > +args.out_size = IAVF_AQ_BUF_SZ;
> > +
> > +err = iavf_execute_vf_cmd(adapter, &args); if (err) {
> > +PMD_DRV_LOG(ERR, "fail to check flow direcotor rule"); return err; }
> > +
> > +fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
> > +
> > +if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { PMD_DRV_LOG(INFO,
> > +"check rule request is successfully done by PF"); }  else if
> > +(fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
> > +PMD_DRV_LOG(ERR, "check rule request is failed due to parameters
> > validation"
> > +" or HW doesn't support");
> > +return -1;
> > +} else {
> > +PMD_DRV_LOG(ERR,
> > +"check rule request is failed due to other reasons"); return -1; }
> > +
> > +return 0;
> > +}
> > +
> > +
> > diff --git a/drivers/net/iavf/meson.build
> > b/drivers/net/iavf/meson.build index
> > 5a5cdd5..f875b72 100644
> > --- a/drivers/net/iavf/meson.build
> > +++ b/drivers/net/iavf/meson.build
> > @@ -14,6 +14,7 @@ sources = files(
> >  'iavf_vchnl.c',
> >  'iavf_generic_flow.c',
> >  'iavf_hash.c',
> > +'iavf_fdir.c',
> >  )
> >
> >  if arch_subdir == 'x86'
> > --
> > 1.8.3.1
> 


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy
  2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
                     ` (4 preceding siblings ...)
  2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 5/5] net/iavf: add support for FDIR mark action Simei Su
@ 2020-04-10 10:18   ` Simei Su
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule Simei Su
                       ` (5 more replies)
  5 siblings, 6 replies; 43+ messages in thread
From: Simei Su @ 2020-04-10 10:18 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

[PATCH 1/5] support FDIR common patterns and actions.
[PATCH 2/5] support FDIR GTPU pattern.
[PATCH 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
[PATCH 4/5] support FDIR PFCP node and session pattern.
[PATCH 5/5] support FDIR mark action.

This patchset depend on the following patches on patchwork:
https://patchwork.dpdk.org/cover/67953/
    [v3,00/11] framework for advanced iAVF PMD

v3:
* Add release notes.
* Update action structures based on virtchnl update.
* Add VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition check and modify
  the error log.

v2:
* Update pattern and action structures based on latest virtchnl design.
* Add upper bound check for pattern layers and action numbers.
* Increase action number in mark only case.
* Consider more circumstances about PF error return status.

Simei Su (5):
  net/iavf: add support for FDIR basic rule
  net/iavf: add support for FDIR GTPU
  net/iavf: add support for FDIR L2TPv3 and IPSec
  net/iavf: add support for FDIR PFCP
  net/iavf: add support for FDIR mark action

 doc/guides/rel_notes/release_20_05.rst |   1 +
 drivers/net/iavf/Makefile              |   1 +
 drivers/net/iavf/iavf.h                |  18 +
 drivers/net/iavf/iavf_fdir.c           | 973 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c          | 154 +++++-
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 1147 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
@ 2020-04-10 10:18     ` Simei Su
  2020-04-14  7:37       ` Ye Xiaolong
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 2/5] net/iavf: add support for FDIR GTPU Simei Su
                       ` (4 subsequent siblings)
  5 siblings, 1 reply; 43+ messages in thread
From: Simei Su @ 2020-04-10 10:18 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch adds FDIR create/destroy/validate function in AVF.
Common pattern and queue/qgroup/passthru/drop actions are supported.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 doc/guides/rel_notes/release_20_05.rst |   1 +
 drivers/net/iavf/Makefile              |   1 +
 drivers/net/iavf/iavf.h                |  17 +
 drivers/net/iavf/iavf_fdir.c           | 749 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c          | 154 ++++++-
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 922 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index b5962d8..17299ef 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -99,6 +99,7 @@ New Features
 
   * Added generic filter support.
   * Added advanced RSS configuration for VFs.
+  * Added advanced iavf with FDIR capability.
 
 
 Removed Items
diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
index 7b0093a..b2b75d7 100644
--- a/drivers/net/iavf/Makefile
+++ b/drivers/net/iavf/Makefile
@@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
 ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
 endif
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d813296..2f84a1f 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,17 @@ struct iavf_vsi {
 struct iavf_flow_parser_node;
 TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
 
+struct iavf_fdir_conf {
+	struct virtchnl_fdir_add add_fltr;
+	struct virtchnl_fdir_del del_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
+struct iavf_fdir_info {
+	struct iavf_fdir_conf conf;
+};
+
 /* TODO: is that correct to assume the max number to be 16 ?*/
 #define IAVF_MAX_MSIX_VECTORS   16
 
@@ -131,6 +142,8 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+
+	struct iavf_fdir_info fdir; /* flow director info */
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -254,4 +267,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
 int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 			 struct virtchnl_rss_cfg *rss_cfg, bool add);
+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
new file mode 100644
index 0000000..f2b10d7
--- /dev/null
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -0,0 +1,749 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+#include "virtchnl.h"
+
+#define IAVF_FDIR_MAX_QREGION_SIZE 128
+
+#define IAVF_FDIR_IPV6_TC_OFFSET 20
+#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
+
+#define IAVF_FDIR_INSET_ETH (\
+	IAVF_INSET_ETHERTYPE)
+
+#define IAVF_FDIR_INSET_ETH_IPV4 (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6 (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+};
+
+static struct iavf_flow_parser iavf_fdir_parser;
+
+static int
+iavf_fdir_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+		parser = &iavf_fdir_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fdir_uninit(struct iavf_adapter *ad)
+{
+	struct iavf_flow_parser *parser;
+
+	parser = &iavf_fdir_parser;
+
+	iavf_unregister_parser(parser, ad);
+}
+
+static int
+iavf_fdir_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	struct iavf_fdir_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory");
+		return -rte_errno;
+	}
+
+	ret = iavf_fdir_add(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Add filter rule failed.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+iavf_fdir_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fdir_conf *)flow->rule;
+
+	ret = iavf_fdir_del(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Del filter rule failed.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return 0;
+}
+
+static int
+iavf_fdir_validation(struct iavf_adapter *ad,
+		__rte_unused struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	int ret;
+
+	ret = iavf_fdir_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Validate filter rule failed.");
+		return -rte_errno;
+	}
+
+	return 0;
+};
+
+static struct iavf_flow_engine iavf_fdir_engine = {
+	.init = iavf_fdir_init,
+	.uninit = iavf_fdir_uninit,
+	.create = iavf_fdir_create,
+	.destroy = iavf_fdir_destroy,
+	.validation = iavf_fdir_validation,
+	.type = IAVF_FLOW_ENGINE_FDIR,
+};
+
+static int
+iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
+			struct rte_flow_error *error,
+			const struct rte_flow_action *act,
+			struct virtchnl_filter_action *filter_action)
+{
+	const struct rte_flow_action_rss *rss = act->conf;
+	uint32_t i;
+
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid action.");
+		return -rte_errno;
+	}
+
+	if (rss->queue_num <= 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Queue region size can't be 0 or 1.");
+		return -rte_errno;
+	}
+
+	/* check if queue index for queue region is continuous */
+	for (i = 0; i < rss->queue_num - 1; i++) {
+		if (rss->queue[i + 1] != rss->queue[i] + 1) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, act,
+					"Discontinuous queue region");
+			return -rte_errno;
+		}
+	}
+
+	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid queue region indexes.");
+		return -rte_errno;
+	}
+
+	if (!(rte_is_power_of_2(rss->queue_num) &&
+		(rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"The region size should be any of the following values:"
+				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+				"of queues do not exceed the VSI allocation.");
+		return -rte_errno;
+	}
+
+	filter_action->act_conf.queue.index = rss->queue[0];
+	filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_action(struct iavf_adapter *ad,
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint32_t dest_num = 0;
+	int ret;
+
+	int number = 0;
+	struct virtchnl_filter_action *filter_action;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_DROP;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+
+			act_q = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+
+			if (filter_action->act_conf.queue.index >=
+				ad->eth_dev->data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue for FDIR.");
+				return -rte_errno;
+			}
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+
+			ret = iavf_fdir_parse_action_qregion(ad,
+						error, actions, filter_action);
+			if (ret)
+				return ret;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, actions,
+					"Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
+			const struct rte_flow_item pattern[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = IAVF_INSET_NONE;
+
+	enum rte_flow_item_type next_type;
+	uint16_t ether_type;
+
+	int layer = 0;
+	struct virtchnl_proto_hdr *hdr;
+
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not support range");
+		}
+
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			next_type = (item + 1)->type;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+				(!eth_spec || !eth_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "NULL eth spec/mask.");
+				return -rte_errno;
+			}
+
+			if (eth_spec && eth_mask) {
+				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid MAC_addr mask.");
+					return -rte_errno;
+				}
+			}
+
+			if (eth_spec && eth_mask && eth_mask->type) {
+				if (eth_mask->type != RTE_BE16(0xffff)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid type mask.");
+					return -rte_errno;
+				}
+
+				ether_type = rte_be_to_cpu_16(eth_spec->type);
+				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+					ether_type == RTE_ETHER_TYPE_IPV6) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Unsupported ether_type.");
+					return -rte_errno;
+				}
+
+				input_set |= IAVF_INSET_ETHERTYPE;
+				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+
+				rte_memcpy(hdr->buffer,
+					eth_spec, sizeof(*eth_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.fragment_offset ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+
+				if (ipv4_mask->hdr.type_of_service ==
+								UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TOS;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_PROTO;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TTL;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv4_spec->hdr,
+					sizeof(ipv4_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+
+				if ((ipv6_mask->hdr.vtc_flow &
+					rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+					== rte_cpu_to_be_32(
+							IAVF_IPV6_TC_MASK)) {
+					input_set |= IAVF_INSET_IPV6_TC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
+				}
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.src_addr))) {
+					input_set |= IAVF_INSET_IPV6_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+				}
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+					input_set |= IAVF_INSET_IPV6_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv6_spec->hdr,
+					sizeof(ipv6_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				if (udp_mask->hdr.dgram_len ||
+					udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+			if (sctp_spec && sctp_mask) {
+				if (sctp_mask->hdr.cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, item,
+			"Protocol header layers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_fdir_conf *filter = &vf->fdir.conf;
+	struct iavf_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		goto error;
+
+	input_set = filter->input_set;
+	if (!input_set || input_set & ~item->input_set_mask) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
+				"Invalid input set");
+		ret = -rte_errno;
+		goto error;
+	}
+
+	ret = iavf_fdir_parse_action(ad, actions, error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(item);
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_fdir_parser = {
+	.engine = &iavf_fdir_engine,
+	.array = iavf_fdir_pattern,
+	.array_len = RTE_DIM(iavf_fdir_pattern),
+	.parse_pattern_action = iavf_fdir_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fdir_engine_register)
+{
+	iavf_register_flow_engine(&iavf_fdir_engine);
+}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index b97326f..fc9d54a 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -337,7 +337,8 @@
 
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
-		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FDIR_PF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -865,3 +866,154 @@
 
 	return err;
 }
+
+int
+iavf_fdir_add(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 0;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+	filter->flow_id = fdir_ret->flow_id;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"add rule request is successfully done by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to no hw resource");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to the rule is already existed");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to the rule is conflict with existing rule");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to the hw doesn't support");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"add rule request is failed due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_del(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_del *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->del_fltr.flow_id = filter->flow_id;
+
+	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->del_fltr);
+	args.in_args_size = sizeof(filter->del_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"delete rule request is successfully done by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR,
+			"delete rule request is failed due to this rule doesn't exist");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"delete rule request is failed due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"delete rule request is failed due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"check rule request is successfully done by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"check rule request is failed due to parameters validation"
+			" or HW doesn't support");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"check rule request is failed due to other reasons");
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 5a5cdd5..f875b72 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -14,6 +14,7 @@ sources = files(
 	'iavf_vchnl.c',
 	'iavf_generic_flow.c',
 	'iavf_hash.c',
+	'iavf_fdir.c',
 )
 
 if arch_subdir == 'x86'
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v3 2/5] net/iavf: add support for FDIR GTPU
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-04-10 10:18     ` Simei Su
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
                       ` (3 subsequent siblings)
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-10 10:18 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables GTPU with TEID and QFI for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 63 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 63 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index f2b10d7..20d3854 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -67,6 +67,14 @@
 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
 
+#define IAVF_FDIR_INSET_GTPU (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID)
+
+#define IAVF_FDIR_INSET_GTPU_EH (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -77,6 +85,8 @@
 	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -363,6 +373,8 @@
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -666,6 +678,57 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = item->spec;
+			gtp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+					gtp_mask->msg_type ||
+					gtp_mask->msg_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				if (gtp_mask->teid == UINT32_MAX) {
+					input_set |= IAVF_INSET_GTPU_TEID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
+				}
+
+				rte_memcpy(hdr->buffer,
+					gtp_spec, sizeof(*gtp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+			gtp_psc_spec = item->spec;
+			gtp_psc_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+
+			if (gtp_psc_spec && gtp_psc_mask) {
+				if (gtp_psc_mask->qfi == UINT8_MAX) {
+					input_set |= IAVF_INSET_GTPU_QFI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
+				}
+
+				rte_memcpy(hdr->buffer, gtp_psc_spec,
+					sizeof(*gtp_psc_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v3 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule Simei Su
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 2/5] net/iavf: add support for FDIR GTPU Simei Su
@ 2020-04-10 10:18     ` Simei Su
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 4/5] net/iavf: add support for FDIR PFCP Simei Su
                       ` (2 subsequent siblings)
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-10 10:18 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables L2TPv3 with SESSION_ID, ESP/AH with SPI, NAT-T
with SPI and IP src/dst for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 91 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 20d3854..58d1821 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,23 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
 
+#define IAVF_FDIR_INSET_L2TPV3OIP (\
+	IAVF_L2TPV3OIP_SESSION_ID)
+
+#define IAVF_FDIR_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -87,6 +104,14 @@
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -375,6 +400,9 @@
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
+	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
+	const struct rte_flow_item_esp *esp_spec, *esp_mask;
+	const struct rte_flow_item_ah *ah_spec, *ah_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -729,6 +757,69 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+			l2tpv3oip_spec = item->spec;
+			l2tpv3oip_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+
+			if (l2tpv3oip_spec && l2tpv3oip_mask) {
+				if (l2tpv3oip_mask->session_id == UINT32_MAX) {
+					input_set |= IAVF_L2TPV3OIP_SESSION_ID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+				}
+
+				rte_memcpy(hdr->buffer, l2tpv3oip_spec,
+					sizeof(*l2tpv3oip_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_ESP:
+			esp_spec = item->spec;
+			esp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+			if (esp_spec && esp_mask) {
+				if (esp_mask->hdr.spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_ESP_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, &esp_spec->hdr,
+					sizeof(esp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_AH:
+			ah_spec = item->spec;
+			ah_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+			if (ah_spec && ah_mask) {
+				if (ah_mask->spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_AH_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, ah_spec,
+					sizeof(*ah_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v3 4/5] net/iavf: add support for FDIR PFCP
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
                       ` (2 preceding siblings ...)
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
@ 2020-04-10 10:18     ` Simei Su
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 5/5] net/iavf: add support for FDIR mark action Simei Su
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-10 10:18 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables PFCP node and sesssion packets with S_FIELD
for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 58d1821..d57bbf9 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -92,6 +92,9 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_ESP_SPI)
 
+#define IAVF_FDIR_INSET_PFCP (\
+	IAVF_INSET_PFCP_S_FIELD)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -112,6 +115,8 @@
 	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -403,6 +408,7 @@
 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
+	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -820,6 +826,27 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_PFCP:
+			pfcp_spec = item->spec;
+			pfcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+			if (pfcp_spec && pfcp_mask) {
+				if (pfcp_mask->s_field == UINT8_MAX) {
+					input_set |= IAVF_INSET_PFCP_S_FIELD;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+				}
+
+				rte_memcpy(hdr->buffer, pfcp_spec,
+					sizeof(*pfcp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v3 5/5] net/iavf: add support for FDIR mark action
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
                       ` (3 preceding siblings ...)
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 4/5] net/iavf: add support for FDIR PFCP Simei Su
@ 2020-04-10 10:18     ` Simei Su
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-10 10:18 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables mark action support and takes mark only case
into consideration.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf.h      |  1 +
 drivers/net/iavf/iavf_fdir.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 2f84a1f..b28a65c 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -97,6 +97,7 @@ struct iavf_fdir_conf {
 	struct virtchnl_fdir_del del_fltr;
 	uint64_t input_set;
 	uint32_t flow_id;
+	uint32_t mark_flag;
 };
 
 struct iavf_fdir_info {
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index d57bbf9..6dd35a8 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -18,6 +18,7 @@
 #include "iavf.h"
 #include "iavf_generic_flow.h"
 #include "virtchnl.h"
+#include "iavf_rxtx.h"
 
 #define IAVF_FDIR_MAX_QREGION_SIZE 128
 
@@ -171,6 +172,9 @@
 		goto free_entry;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 1);
+
 	rte_memcpy(rule, filter, sizeof(*rule));
 	flow->rule = rule;
 
@@ -199,6 +203,9 @@
 		return -rte_errno;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 0);
+
 	flow->rule = NULL;
 	rte_free(filter);
 
@@ -297,7 +304,9 @@
 			struct iavf_fdir_conf *filter)
 {
 	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec = NULL;
 	uint32_t dest_num = 0;
+	uint32_t mark_num = 0;
 	int ret;
 
 	int number = 0;
@@ -363,6 +372,19 @@
 			filter->add_fltr.rule_cfg.action_set.count = ++number;
 			break;
 
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark_num++;
+
+			filter->mark_flag = 1;
+			mark_spec = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_MARK;
+			filter_action->act_conf.mark_id = mark_spec->id;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
 		default:
 			rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -378,13 +400,34 @@
 		return -rte_errno;
 	}
 
-	if (dest_num == 0 || dest_num >= 2) {
+	if (dest_num >= 2) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
 			"Unsupported action combination");
 		return -rte_errno;
 	}
 
+	if (mark_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Too many mark actions");
+		return -rte_errno;
+	}
+
+	if (dest_num + mark_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Emtpy action");
+		return -rte_errno;
+	}
+
+	/* Mark only is equal to mark + passthru. */
+	if (dest_num == 0) {
+		filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+		filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+		filter->add_fltr.rule_cfg.action_set.count = ++number;
+	}
+
 	return 0;
 }
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-04-14  7:37       ` Ye Xiaolong
  2020-04-14  8:31         ` Su, Simei
  0 siblings, 1 reply; 43+ messages in thread
From: Ye Xiaolong @ 2020-04-14  7:37 UTC (permalink / raw)
  To: Simei Su; +Cc: qi.z.zhang, jingjing.wu, dev, yahui.cao

On 04/10, Simei Su wrote:
>This patch adds FDIR create/destroy/validate function in AVF.
>Common pattern and queue/qgroup/passthru/drop actions are supported.
>
>Signed-off-by: Simei Su <simei.su@intel.com>
>---
> doc/guides/rel_notes/release_20_05.rst |   1 +
> drivers/net/iavf/Makefile              |   1 +
> drivers/net/iavf/iavf.h                |  17 +
> drivers/net/iavf/iavf_fdir.c           | 749 +++++++++++++++++++++++++++++++++
> drivers/net/iavf/iavf_vchnl.c          | 154 ++++++-
> drivers/net/iavf/meson.build           |   1 +
> 6 files changed, 922 insertions(+), 1 deletion(-)
> create mode 100644 drivers/net/iavf/iavf_fdir.c
>
>diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
>index b5962d8..17299ef 100644
>--- a/doc/guides/rel_notes/release_20_05.rst
>+++ b/doc/guides/rel_notes/release_20_05.rst
>@@ -99,6 +99,7 @@ New Features
> 
>   * Added generic filter support.
>   * Added advanced RSS configuration for VFs.
>+  * Added advanced iavf with FDIR capability.
> 
> 
> Removed Items
>diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
>index 7b0093a..b2b75d7 100644
>--- a/drivers/net/iavf/Makefile
>+++ b/drivers/net/iavf/Makefile
>@@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
>+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
> ifeq ($(CONFIG_RTE_ARCH_X86), y)
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
> endif
>diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
>index d813296..2f84a1f 100644
>--- a/drivers/net/iavf/iavf.h
>+++ b/drivers/net/iavf/iavf.h
>@@ -92,6 +92,17 @@ struct iavf_vsi {
> struct iavf_flow_parser_node;
> TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
> 
>+struct iavf_fdir_conf {
>+	struct virtchnl_fdir_add add_fltr;
>+	struct virtchnl_fdir_del del_fltr;
>+	uint64_t input_set;
>+	uint32_t flow_id;
>+};
>+
>+struct iavf_fdir_info {
>+	struct iavf_fdir_conf conf;
>+};

Why we need struct iavf_fdir_info since it has only one member?

>+
> /* TODO: is that correct to assume the max number to be 16 ?*/
> #define IAVF_MAX_MSIX_VECTORS   16
> 
>@@ -131,6 +142,8 @@ struct iavf_info {
> 	rte_spinlock_t flow_ops_lock;
> 	struct iavf_parser_list rss_parser_list;
> 	struct iavf_parser_list dist_parser_list;
>+
>+	struct iavf_fdir_info fdir; /* flow director info */
> };
> 
> #define IAVF_MAX_PKT_TYPE 1024
>@@ -254,4 +267,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
> int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
> int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
> 			 struct virtchnl_rss_cfg *rss_cfg, bool add);
>+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
>+int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
>+int iavf_fdir_check(struct iavf_adapter *adapter,
>+		struct iavf_fdir_conf *filter);
> #endif /* _IAVF_ETHDEV_H_ */
>diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
>new file mode 100644
>index 0000000..f2b10d7
>--- /dev/null
>+++ b/drivers/net/iavf/iavf_fdir.c
>@@ -0,0 +1,749 @@
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2019 Intel Corporation

Should be 2020.

>+ */
>+
>+#include <sys/queue.h>
>+#include <stdio.h>
>+#include <errno.h>
>+#include <stdint.h>
>+#include <string.h>
>+#include <unistd.h>
>+#include <stdarg.h>
>+
>+#include <rte_ether.h>
>+#include <rte_ethdev_driver.h>
>+#include <rte_malloc.h>
>+#include <rte_tailq.h>
>+
>+#include "iavf.h"
>+#include "iavf_generic_flow.h"
>+#include "virtchnl.h"
>+
>+#define IAVF_FDIR_MAX_QREGION_SIZE 128
>+
>+#define IAVF_FDIR_IPV6_TC_OFFSET 20
>+#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
>+
>+#define IAVF_FDIR_INSET_ETH (\
>+	IAVF_INSET_ETHERTYPE)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV4 (\
>+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
>+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
>+	IAVF_INSET_IPV4_TTL)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
>+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
>+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
>+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
>+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
>+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
>+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
>+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
>+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
>+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV6 (\
>+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
>+	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
>+	IAVF_INSET_IPV6_HOP_LIMIT)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
>+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
>+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
>+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
>+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
>+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
>+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
>+
>+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
>+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
>+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
>+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
>+
>+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
>+	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,		IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
>+	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
>+};
>+
>+static struct iavf_flow_parser iavf_fdir_parser;
>+
>+static int
>+iavf_fdir_init(struct iavf_adapter *ad)
>+{
>+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
>+	struct iavf_flow_parser *parser;
>+
>+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)

Need to check whether vf->vf_res is NULL, otherwise it may cause segfault.

>+		parser = &iavf_fdir_parser;
>+	else
>+		return -ENOTSUP;
>+
>+	return iavf_register_parser(parser, ad);
>+}
>+
>+static void
>+iavf_fdir_uninit(struct iavf_adapter *ad)
>+{
>+	struct iavf_flow_parser *parser;
>+
>+	parser = &iavf_fdir_parser;
>+
>+	iavf_unregister_parser(parser, ad);

Simplify to iavf_unregister_parser(&iavf_fdir_parser, ad) ?

>+}
>+
>+static int
>+iavf_fdir_create(struct iavf_adapter *ad,
>+		struct rte_flow *flow,
>+		void *meta,
>+		struct rte_flow_error *error)
>+{
>+	struct iavf_fdir_conf *filter = meta;
>+	struct iavf_fdir_conf *rule;
>+	int ret;
>+
>+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
>+	if (!rule) {
>+		rte_flow_error_set(error, ENOMEM,
>+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>+				"Failed to allocate memory");

Better to be more specific, like "Failed to allocate memory for fdir rule."

>+		return -rte_errno;
>+	}
>+
>+	ret = iavf_fdir_add(ad, filter);
>+	if (ret) {
>+		rte_flow_error_set(error, -ret,
>+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>+				"Add filter rule failed.");

What about "Failed to add filter rule" to make it consistent with other error log.
And same for other error logs below.


Thanks,
Xiaolong


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule
  2020-04-14  7:37       ` Ye Xiaolong
@ 2020-04-14  8:31         ` Su, Simei
  0 siblings, 0 replies; 43+ messages in thread
From: Su, Simei @ 2020-04-14  8:31 UTC (permalink / raw)
  To: Ye, Xiaolong; +Cc: Zhang, Qi Z, Wu, Jingjing, dev, Cao, Yahui

Hi, Xiaolong

> -----Original Message-----
> From: Ye, Xiaolong <xiaolong.ye@intel.com>
> Sent: Tuesday, April 14, 2020 3:37 PM
> To: Su, Simei <simei.su@intel.com>
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>
> Subject: Re: [PATCH v3 1/5] net/iavf: add support for FDIR basic rule
> 
> On 04/10, Simei Su wrote:
> >This patch adds FDIR create/destroy/validate function in AVF.
> >Common pattern and queue/qgroup/passthru/drop actions are supported.
> >
> >Signed-off-by: Simei Su <simei.su@intel.com>
> >---
> > doc/guides/rel_notes/release_20_05.rst |   1 +
> > drivers/net/iavf/Makefile              |   1 +
> > drivers/net/iavf/iavf.h                |  17 +
> > drivers/net/iavf/iavf_fdir.c           | 749
> +++++++++++++++++++++++++++++++++
> > drivers/net/iavf/iavf_vchnl.c          | 154 ++++++-
> > drivers/net/iavf/meson.build           |   1 +
> > 6 files changed, 922 insertions(+), 1 deletion(-)  create mode 100644
> >drivers/net/iavf/iavf_fdir.c
> >
> >diff --git a/doc/guides/rel_notes/release_20_05.rst
> >b/doc/guides/rel_notes/release_20_05.rst
> >index b5962d8..17299ef 100644
> >--- a/doc/guides/rel_notes/release_20_05.rst
> >+++ b/doc/guides/rel_notes/release_20_05.rst
> >@@ -99,6 +99,7 @@ New Features
> >
> >   * Added generic filter support.
> >   * Added advanced RSS configuration for VFs.
> >+  * Added advanced iavf with FDIR capability.
> >
> >
> > Removed Items
> >diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
> >index 7b0093a..b2b75d7 100644
> >--- a/drivers/net/iavf/Makefile
> >+++ b/drivers/net/iavf/Makefile
> >@@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) +=
> iavf_vchnl.c
> > SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
> > SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
> > SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
> >+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
> > ifeq ($(CONFIG_RTE_ARCH_X86), y)
> > SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c  endif
> diff
> >--git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> >d813296..2f84a1f 100644
> >--- a/drivers/net/iavf/iavf.h
> >+++ b/drivers/net/iavf/iavf.h
> >@@ -92,6 +92,17 @@ struct iavf_vsi {
> > struct iavf_flow_parser_node;
> > TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
> >
> >+struct iavf_fdir_conf {
> >+	struct virtchnl_fdir_add add_fltr;
> >+	struct virtchnl_fdir_del del_fltr;
> >+	uint64_t input_set;
> >+	uint32_t flow_id;
> >+};
> >+
> >+struct iavf_fdir_info {
> >+	struct iavf_fdir_conf conf;
> >+};
> 
> Why we need struct iavf_fdir_info since it has only one member?

  In 20.05, it doesn't support flow director query counter. It will support query counter feature later,
Which may need related counter config. So I write it in this form in case other configuration will be added in the future.

> 
> >+
> > /* TODO: is that correct to assume the max number to be 16 ?*/
> > #define IAVF_MAX_MSIX_VECTORS   16
> >
> >@@ -131,6 +142,8 @@ struct iavf_info {
> > 	rte_spinlock_t flow_ops_lock;
> > 	struct iavf_parser_list rss_parser_list;
> > 	struct iavf_parser_list dist_parser_list;
> >+
> >+	struct iavf_fdir_info fdir; /* flow director info */
> > };
> >
> > #define IAVF_MAX_PKT_TYPE 1024
> >@@ -254,4 +267,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter
> >*adapter,  int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t
> >vlanid, bool add);  int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
> > 			 struct virtchnl_rss_cfg *rss_cfg, bool add);
> >+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> >+*filter); int iavf_fdir_del(struct iavf_adapter *adapter, struct
> >+iavf_fdir_conf *filter); int iavf_fdir_check(struct iavf_adapter *adapter,
> >+		struct iavf_fdir_conf *filter);
> > #endif /* _IAVF_ETHDEV_H_ */
> >diff --git a/drivers/net/iavf/iavf_fdir.c
> >b/drivers/net/iavf/iavf_fdir.c new file mode 100644 index
> >0000000..f2b10d7
> >--- /dev/null
> >+++ b/drivers/net/iavf/iavf_fdir.c
> >@@ -0,0 +1,749 @@
> >+/* SPDX-License-Identifier: BSD-3-Clause
> >+ * Copyright(c) 2019 Intel Corporation
> 
> Should be 2020.

  Yes, wrote it wrong.

> 
> >+ */
> >+
> >+#include <sys/queue.h>
> >+#include <stdio.h>
> >+#include <errno.h>
> >+#include <stdint.h>
> >+#include <string.h>
> >+#include <unistd.h>
> >+#include <stdarg.h>
> >+
> >+#include <rte_ether.h>
> >+#include <rte_ethdev_driver.h>
> >+#include <rte_malloc.h>
> >+#include <rte_tailq.h>
> >+
> >+#include "iavf.h"
> >+#include "iavf_generic_flow.h"
> >+#include "virtchnl.h"
> >+
> >+#define IAVF_FDIR_MAX_QREGION_SIZE 128
> >+
> >+#define IAVF_FDIR_IPV6_TC_OFFSET 20
> >+#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
> >+
> >+#define IAVF_FDIR_INSET_ETH (\
> >+	IAVF_INSET_ETHERTYPE)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV4 (\
> >+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> >+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
> >+	IAVF_INSET_IPV4_TTL)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
> >+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> >+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> >+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
> >+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> >+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> >+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
> >+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> >+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> >+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV6 (\
> >+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> >+	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
> >+	IAVF_INSET_IPV6_HOP_LIMIT)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
> >+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> >+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> >+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
> >+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> >+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> >+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> >+
> >+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
> >+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> >+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> >+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> >+
> >+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
> >+	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,
> 	IAVF_INSET_NONE},
> >+	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,
> 	IAVF_INSET_NONE},
> >+};
> >+
> >+static struct iavf_flow_parser iavf_fdir_parser;
> >+
> >+static int
> >+iavf_fdir_init(struct iavf_adapter *ad) {
> >+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> >+	struct iavf_flow_parser *parser;
> >+
> >+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
> 
> Need to check whether vf->vf_res is NULL, otherwise it may cause segfault.
> 
   Ok, I will think it over.

> >+		parser = &iavf_fdir_parser;
> >+	else
> >+		return -ENOTSUP;
> >+
> >+	return iavf_register_parser(parser, ad); }
> >+
> >+static void
> >+iavf_fdir_uninit(struct iavf_adapter *ad) {
> >+	struct iavf_flow_parser *parser;
> >+
> >+	parser = &iavf_fdir_parser;
> >+
> >+	iavf_unregister_parser(parser, ad);
> 
> Simplify to iavf_unregister_parser(&iavf_fdir_parser, ad) ?

  Ok, will simplify it.

> 
> >+}
> >+
> >+static int
> >+iavf_fdir_create(struct iavf_adapter *ad,
> >+		struct rte_flow *flow,
> >+		void *meta,
> >+		struct rte_flow_error *error)
> >+{
> >+	struct iavf_fdir_conf *filter = meta;
> >+	struct iavf_fdir_conf *rule;
> >+	int ret;
> >+
> >+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
> >+	if (!rule) {
> >+		rte_flow_error_set(error, ENOMEM,
> >+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >+				"Failed to allocate memory");
> 
> Better to be more specific, like "Failed to allocate memory for fdir rule."

   Ok, it makes sense.

> 
> >+		return -rte_errno;
> >+	}
> >+
> >+	ret = iavf_fdir_add(ad, filter);
> >+	if (ret) {
> >+		rte_flow_error_set(error, -ret,
> >+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >+				"Add filter rule failed.");
> 
> What about "Failed to add filter rule" to make it consistent with other error
> log.
> And same for other error logs below.

  Ok, I will modify it. Thanks.

> 
> 
> Thanks,
> Xiaolong

Br
Simei

^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy
  2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
                       ` (4 preceding siblings ...)
  2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 5/5] net/iavf: add support for FDIR mark action Simei Su
@ 2020-04-15  2:55     ` Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 1/5] net/iavf: add support for FDIR basic rule Simei Su
                         ` (6 more replies)
  5 siblings, 7 replies; 43+ messages in thread
From: Simei Su @ 2020-04-15  2:55 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

[PATCH v4 1/5] support FDIR common patterns and actions.
[PATCH v4 2/5] support FDIR GTPU pattern.
[PATCH v4 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
[PATCH v4 4/5] support FDIR PFCP node and session pattern.
[PATCH v4 5/5] support FDIR mark action.

This patchset depend on the following patches on patchwork:
https://patchwork.dpdk.org/cover/68372/
    [v4,00/11] framework for advanced iAVF PMD

v4:
* Add to check vf->vf_res.
* Simplify some codes.
* Specify and refine some error logs.

v3:
* Add release notes.
* Update action structures based on virtchnl update.
* Add VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition check and modify
  the error log.

v2:
* Update pattern and action structures based on latest virtchnl design.
* Add upper bound check for pattern layers and action numbers.
* Increase action number in mark only case.
* Consider more circumstances about PF error return status.

Simei Su (5):
  net/iavf: add support for FDIR basic rule
  net/iavf: add support for FDIR GTPU
  net/iavf: add support for FDIR L2TPv3 and IPSec
  net/iavf: add support for FDIR PFCP
  net/iavf: add support for FDIR mark action

 doc/guides/rel_notes/release_20_05.rst |   1 +
 drivers/net/iavf/Makefile              |   1 +
 drivers/net/iavf/iavf.h                |  18 +
 drivers/net/iavf/iavf_fdir.c           | 971 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c          | 154 +++++-
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 1145 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v4 1/5] net/iavf: add support for FDIR basic rule
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
@ 2020-04-15  2:55       ` Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 2/5] net/iavf: add support for FDIR GTPU Simei Su
                         ` (5 subsequent siblings)
  6 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-15  2:55 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch adds FDIR create/destroy/validate function in AVF.
Common pattern and queue/qgroup/passthru/drop actions are supported.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 doc/guides/rel_notes/release_20_05.rst |   1 +
 drivers/net/iavf/Makefile              |   1 +
 drivers/net/iavf/iavf.h                |  17 +
 drivers/net/iavf/iavf_fdir.c           | 747 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c          | 154 ++++++-
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 920 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index b5962d8..17299ef 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -99,6 +99,7 @@ New Features
 
   * Added generic filter support.
   * Added advanced RSS configuration for VFs.
+  * Added advanced iavf with FDIR capability.
 
 
 Removed Items
diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
index 7b0093a..b2b75d7 100644
--- a/drivers/net/iavf/Makefile
+++ b/drivers/net/iavf/Makefile
@@ -25,6 +25,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
 ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
 endif
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d813296..2f84a1f 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,17 @@ struct iavf_vsi {
 struct iavf_flow_parser_node;
 TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
 
+struct iavf_fdir_conf {
+	struct virtchnl_fdir_add add_fltr;
+	struct virtchnl_fdir_del del_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
+struct iavf_fdir_info {
+	struct iavf_fdir_conf conf;
+};
+
 /* TODO: is that correct to assume the max number to be 16 ?*/
 #define IAVF_MAX_MSIX_VECTORS   16
 
@@ -131,6 +142,8 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+
+	struct iavf_fdir_info fdir; /* flow director info */
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -254,4 +267,8 @@ int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
 int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 			 struct virtchnl_rss_cfg *rss_cfg, bool add);
+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
new file mode 100644
index 0000000..9b03d29
--- /dev/null
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -0,0 +1,747 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+#include "virtchnl.h"
+
+#define IAVF_FDIR_MAX_QREGION_SIZE 128
+
+#define IAVF_FDIR_IPV6_TC_OFFSET 20
+#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
+
+#define IAVF_FDIR_INSET_ETH (\
+	IAVF_INSET_ETHERTYPE)
+
+#define IAVF_FDIR_INSET_ETH_IPV4 (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6 (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+};
+
+static struct iavf_flow_parser iavf_fdir_parser;
+
+static int
+iavf_fdir_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+		parser = &iavf_fdir_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fdir_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fdir_parser, ad);
+}
+
+static int
+iavf_fdir_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	struct iavf_fdir_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fdir rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_fdir_add(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to add filter rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+iavf_fdir_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fdir_conf *)flow->rule;
+
+	ret = iavf_fdir_del(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to delete filter rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return 0;
+}
+
+static int
+iavf_fdir_validation(struct iavf_adapter *ad,
+		__rte_unused struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	int ret;
+
+	ret = iavf_fdir_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return 0;
+};
+
+static struct iavf_flow_engine iavf_fdir_engine = {
+	.init = iavf_fdir_init,
+	.uninit = iavf_fdir_uninit,
+	.create = iavf_fdir_create,
+	.destroy = iavf_fdir_destroy,
+	.validation = iavf_fdir_validation,
+	.type = IAVF_FLOW_ENGINE_FDIR,
+};
+
+static int
+iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
+			struct rte_flow_error *error,
+			const struct rte_flow_action *act,
+			struct virtchnl_filter_action *filter_action)
+{
+	const struct rte_flow_action_rss *rss = act->conf;
+	uint32_t i;
+
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid action.");
+		return -rte_errno;
+	}
+
+	if (rss->queue_num <= 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Queue region size can't be 0 or 1.");
+		return -rte_errno;
+	}
+
+	/* check if queue index for queue region is continuous */
+	for (i = 0; i < rss->queue_num - 1; i++) {
+		if (rss->queue[i + 1] != rss->queue[i] + 1) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, act,
+					"Discontinuous queue region");
+			return -rte_errno;
+		}
+	}
+
+	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid queue region indexes.");
+		return -rte_errno;
+	}
+
+	if (!(rte_is_power_of_2(rss->queue_num) &&
+		rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"The region size should be any of the following values:"
+				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+				"of queues do not exceed the VSI allocation.");
+		return -rte_errno;
+	}
+
+	filter_action->act_conf.queue.index = rss->queue[0];
+	filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_action(struct iavf_adapter *ad,
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint32_t dest_num = 0;
+	int ret;
+
+	int number = 0;
+	struct virtchnl_filter_action *filter_action;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_DROP;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+
+			act_q = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+
+			if (filter_action->act_conf.queue.index >=
+				ad->eth_dev->data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue for FDIR.");
+				return -rte_errno;
+			}
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+
+			ret = iavf_fdir_parse_action_qregion(ad,
+						error, actions, filter_action);
+			if (ret)
+				return ret;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, actions,
+					"Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
+			const struct rte_flow_item pattern[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = IAVF_INSET_NONE;
+
+	enum rte_flow_item_type next_type;
+	uint16_t ether_type;
+
+	int layer = 0;
+	struct virtchnl_proto_hdr *hdr;
+
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not support range");
+		}
+
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			next_type = (item + 1)->type;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+				(!eth_spec || !eth_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "NULL eth spec/mask.");
+				return -rte_errno;
+			}
+
+			if (eth_spec && eth_mask) {
+				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid MAC_addr mask.");
+					return -rte_errno;
+				}
+			}
+
+			if (eth_spec && eth_mask && eth_mask->type) {
+				if (eth_mask->type != RTE_BE16(0xffff)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid type mask.");
+					return -rte_errno;
+				}
+
+				ether_type = rte_be_to_cpu_16(eth_spec->type);
+				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+					ether_type == RTE_ETHER_TYPE_IPV6) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Unsupported ether_type.");
+					return -rte_errno;
+				}
+
+				input_set |= IAVF_INSET_ETHERTYPE;
+				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+
+				rte_memcpy(hdr->buffer,
+					eth_spec, sizeof(*eth_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.fragment_offset ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+
+				if (ipv4_mask->hdr.type_of_service ==
+								UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TOS;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_PROTO;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TTL;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv4_spec->hdr,
+					sizeof(ipv4_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+
+				if ((ipv6_mask->hdr.vtc_flow &
+					rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+					== rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
+					input_set |= IAVF_INSET_IPV6_TC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
+				}
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.src_addr))) {
+					input_set |= IAVF_INSET_IPV6_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+				}
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+					input_set |= IAVF_INSET_IPV6_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv6_spec->hdr,
+					sizeof(ipv6_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				if (udp_mask->hdr.dgram_len ||
+					udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+			if (sctp_spec && sctp_mask) {
+				if (sctp_mask->hdr.cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, item,
+			"Protocol header layers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_fdir_conf *filter = &vf->fdir.conf;
+	struct iavf_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		goto error;
+
+	input_set = filter->input_set;
+	if (!input_set || input_set & ~item->input_set_mask) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
+				"Invalid input set");
+		ret = -rte_errno;
+		goto error;
+	}
+
+	ret = iavf_fdir_parse_action(ad, actions, error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(item);
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_fdir_parser = {
+	.engine = &iavf_fdir_engine,
+	.array = iavf_fdir_pattern,
+	.array_len = RTE_DIM(iavf_fdir_pattern),
+	.parse_pattern_action = iavf_fdir_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fdir_engine_register)
+{
+	iavf_register_flow_engine(&iavf_fdir_engine);
+}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index b97326f..6974b8b 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -337,7 +337,8 @@
 
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
-		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FDIR_PF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -865,3 +866,154 @@
 
 	return err;
 }
+
+int
+iavf_fdir_add(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 0;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+	filter->flow_id = fdir_ret->flow_id;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"Succeed in adding rule request by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to no hw resource");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to the rule is already existed");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to the rule is conflict with existing rule");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to the hw doesn't support");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_del(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_del *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->del_fltr.flow_id = filter->flow_id;
+
+	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->del_fltr);
+	args.in_args_size = sizeof(filter->del_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"Succeed in deleting rule request by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR,
+			"Failed to delete rule request due to this rule doesn't exist");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"Failed to delete rule request due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"Failed to delete rule request due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"Succeed in checking rule request by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"Failed to check rule request due to parameters validation"
+			" or HW doesn't support");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"Failed to check rule request due to other reasons");
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 5a5cdd5..f875b72 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -14,6 +14,7 @@ sources = files(
 	'iavf_vchnl.c',
 	'iavf_generic_flow.c',
 	'iavf_hash.c',
+	'iavf_fdir.c',
 )
 
 if arch_subdir == 'x86'
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v4 2/5] net/iavf: add support for FDIR GTPU
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-04-15  2:55       ` Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
                         ` (4 subsequent siblings)
  6 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-15  2:55 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables GTPU with TEID and QFI for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 63 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 63 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 9b03d29..7c0bb14 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -67,6 +67,14 @@
 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
 
+#define IAVF_FDIR_INSET_GTPU (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID)
+
+#define IAVF_FDIR_INSET_GTPU_EH (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -77,6 +85,8 @@
 	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -362,6 +372,8 @@
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -664,6 +676,57 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = item->spec;
+			gtp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+					gtp_mask->msg_type ||
+					gtp_mask->msg_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				if (gtp_mask->teid == UINT32_MAX) {
+					input_set |= IAVF_INSET_GTPU_TEID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
+				}
+
+				rte_memcpy(hdr->buffer,
+					gtp_spec, sizeof(*gtp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+			gtp_psc_spec = item->spec;
+			gtp_psc_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+
+			if (gtp_psc_spec && gtp_psc_mask) {
+				if (gtp_psc_mask->qfi == UINT8_MAX) {
+					input_set |= IAVF_INSET_GTPU_QFI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
+				}
+
+				rte_memcpy(hdr->buffer, gtp_psc_spec,
+					sizeof(*gtp_psc_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v4 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 1/5] net/iavf: add support for FDIR basic rule Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 2/5] net/iavf: add support for FDIR GTPU Simei Su
@ 2020-04-15  2:55       ` Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 4/5] net/iavf: add support for FDIR PFCP Simei Su
                         ` (3 subsequent siblings)
  6 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-15  2:55 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables L2TPv3 with SESSION_ID, ESP/AH with SPI, NAT-T
with SPI and IP src/dst for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 91 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 7c0bb14..1e59c7b 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,23 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
 
+#define IAVF_FDIR_INSET_L2TPV3OIP (\
+	IAVF_L2TPV3OIP_SESSION_ID)
+
+#define IAVF_FDIR_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -87,6 +104,14 @@
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -374,6 +399,9 @@
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
+	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
+	const struct rte_flow_item_esp *esp_spec, *esp_mask;
+	const struct rte_flow_item_ah *ah_spec, *ah_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -727,6 +755,69 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+			l2tpv3oip_spec = item->spec;
+			l2tpv3oip_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+
+			if (l2tpv3oip_spec && l2tpv3oip_mask) {
+				if (l2tpv3oip_mask->session_id == UINT32_MAX) {
+					input_set |= IAVF_L2TPV3OIP_SESSION_ID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+				}
+
+				rte_memcpy(hdr->buffer, l2tpv3oip_spec,
+					sizeof(*l2tpv3oip_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_ESP:
+			esp_spec = item->spec;
+			esp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+			if (esp_spec && esp_mask) {
+				if (esp_mask->hdr.spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_ESP_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, &esp_spec->hdr,
+					sizeof(esp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_AH:
+			ah_spec = item->spec;
+			ah_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+			if (ah_spec && ah_mask) {
+				if (ah_mask->spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_AH_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, ah_spec,
+					sizeof(*ah_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v4 4/5] net/iavf: add support for FDIR PFCP
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
                         ` (2 preceding siblings ...)
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
@ 2020-04-15  2:55       ` Simei Su
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 5/5] net/iavf: add support for FDIR mark action Simei Su
                         ` (2 subsequent siblings)
  6 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-15  2:55 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables PFCP node and sesssion packets with S_FIELD
for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 1e59c7b..1e50a07 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -92,6 +92,9 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_ESP_SPI)
 
+#define IAVF_FDIR_INSET_PFCP (\
+	IAVF_INSET_PFCP_S_FIELD)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -112,6 +115,8 @@
 	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -402,6 +407,7 @@
 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
+	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -818,6 +824,27 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_PFCP:
+			pfcp_spec = item->spec;
+			pfcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+			if (pfcp_spec && pfcp_mask) {
+				if (pfcp_mask->s_field == UINT8_MAX) {
+					input_set |= IAVF_INSET_PFCP_S_FIELD;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+				}
+
+				rte_memcpy(hdr->buffer, pfcp_spec,
+					sizeof(*pfcp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v4 5/5] net/iavf: add support for FDIR mark action
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
                         ` (3 preceding siblings ...)
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 4/5] net/iavf: add support for FDIR PFCP Simei Su
@ 2020-04-15  2:55       ` Simei Su
  2020-04-15  3:17       ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Zhang, Qi Z
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
  6 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-15  2:55 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables mark action support and takes mark only case
into consideration.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf.h      |  1 +
 drivers/net/iavf/iavf_fdir.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 2f84a1f..b28a65c 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -97,6 +97,7 @@ struct iavf_fdir_conf {
 	struct virtchnl_fdir_del del_fltr;
 	uint64_t input_set;
 	uint32_t flow_id;
+	uint32_t mark_flag;
 };
 
 struct iavf_fdir_info {
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 1e50a07..406622c 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -18,6 +18,7 @@
 #include "iavf.h"
 #include "iavf_generic_flow.h"
 #include "virtchnl.h"
+#include "iavf_rxtx.h"
 
 #define IAVF_FDIR_MAX_QREGION_SIZE 128
 
@@ -170,6 +171,9 @@
 		goto free_entry;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 1);
+
 	rte_memcpy(rule, filter, sizeof(*rule));
 	flow->rule = rule;
 
@@ -198,6 +202,9 @@
 		return -rte_errno;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 0);
+
 	flow->rule = NULL;
 	rte_free(filter);
 
@@ -296,7 +303,9 @@
 			struct iavf_fdir_conf *filter)
 {
 	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec = NULL;
 	uint32_t dest_num = 0;
+	uint32_t mark_num = 0;
 	int ret;
 
 	int number = 0;
@@ -362,6 +371,19 @@
 			filter->add_fltr.rule_cfg.action_set.count = ++number;
 			break;
 
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark_num++;
+
+			filter->mark_flag = 1;
+			mark_spec = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_MARK;
+			filter_action->act_conf.mark_id = mark_spec->id;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
 		default:
 			rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -377,13 +399,34 @@
 		return -rte_errno;
 	}
 
-	if (dest_num == 0 || dest_num >= 2) {
+	if (dest_num >= 2) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
 			"Unsupported action combination");
 		return -rte_errno;
 	}
 
+	if (mark_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Too many mark actions");
+		return -rte_errno;
+	}
+
+	if (dest_num + mark_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Emtpy action");
+		return -rte_errno;
+	}
+
+	/* Mark only is equal to mark + passthru. */
+	if (dest_num == 0) {
+		filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+		filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+		filter->add_fltr.rule_cfg.action_set.count = ++number;
+	}
+
 	return 0;
 }
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
                         ` (4 preceding siblings ...)
  2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 5/5] net/iavf: add support for FDIR mark action Simei Su
@ 2020-04-15  3:17       ` Zhang, Qi Z
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
  6 siblings, 0 replies; 43+ messages in thread
From: Zhang, Qi Z @ 2020-04-15  3:17 UTC (permalink / raw)
  To: Su, Simei, Ye, Xiaolong, Wu, Jingjing; +Cc: dev, Cao, Yahui



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, April 15, 2020 10:55 AM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Ye, Xiaolong
> <xiaolong.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Cao, Yahui <yahui.cao@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v4 0/5] net/iavf: support FDIR capabiltiy
> 
> [PATCH v4 1/5] support FDIR common patterns and actions.
> [PATCH v4 2/5] support FDIR GTPU pattern.
> [PATCH v4 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
> [PATCH v4 4/5] support FDIR PFCP node and session pattern.
> [PATCH v4 5/5] support FDIR mark action.
> 
> This patchset depend on the following patches on patchwork:
> https://patchwork.dpdk.org/cover/68372/
>     [v4,00/11] framework for advanced iAVF PMD
> 
> v4:
> * Add to check vf->vf_res.
> * Simplify some codes.
> * Specify and refine some error logs.
> 
> v3:
> * Add release notes.
> * Update action structures based on virtchnl update.
> * Add VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition check and modify
>   the error log.
> 
> v2:
> * Update pattern and action structures based on latest virtchnl design.
> * Add upper bound check for pattern layers and action numbers.
> * Increase action number in mark only case.
> * Consider more circumstances about PF error return status.
> 
> Simei Su (5):
>   net/iavf: add support for FDIR basic rule
>   net/iavf: add support for FDIR GTPU
>   net/iavf: add support for FDIR L2TPv3 and IPSec
>   net/iavf: add support for FDIR PFCP
>   net/iavf: add support for FDIR mark action
> 
>  doc/guides/rel_notes/release_20_05.rst |   1 +
>  drivers/net/iavf/Makefile              |   1 +
>  drivers/net/iavf/iavf.h                |  18 +
>  drivers/net/iavf/iavf_fdir.c           | 971
> +++++++++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_vchnl.c          | 154 +++++-
>  drivers/net/iavf/meson.build           |   1 +
>  6 files changed, 1145 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/iavf/iavf_fdir.c
> 
> --
> 1.8.3.1

Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>



^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v5 0/5] net/iavf: support FDIR capabiltiy
  2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
                         ` (5 preceding siblings ...)
  2020-04-15  3:17       ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Zhang, Qi Z
@ 2020-04-21  6:19       ` Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 1/5] net/iavf: add support for FDIR basic rule Simei Su
                           ` (5 more replies)
  6 siblings, 6 replies; 43+ messages in thread
From: Simei Su @ 2020-04-21  6:19 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

[PATCH v5 1/5] support FDIR common patterns and actions.
[PATCH v5 2/5] support FDIR GTPU pattern.
[PATCH v5 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
[PATCH v5 4/5] support FDIR PFCP node and session pattern.
[PATCH v5 5/5] support FDIR mark action.

v5:
* Do rebase on the newest codes.

v4:
* Add to check vf->vf_res.
* Simplify some codes.
* Specify and refine some error logs.

v3:
* Add release notes.
* Update action structures based on virtchnl update.
* Add VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition check and modify
  the error log.

v2:
* Update pattern and action structures based on latest virtchnl design.
* Add upper bound check for pattern layers and action numbers.
* Increase action number in mark only case.
* Consider more circumstances about PF error return status.

Simei Su (5):
  net/iavf: add support for FDIR basic rule
  net/iavf: add support for FDIR GTPU
  net/iavf: add support for FDIR L2TPv3 and IPSec
  net/iavf: add support for FDIR PFCP
  net/iavf: add support for FDIR mark action

 doc/guides/rel_notes/release_20_05.rst |   1 +
 drivers/net/iavf/Makefile              |   1 +
 drivers/net/iavf/iavf.h                |  18 +
 drivers/net/iavf/iavf_fdir.c           | 971 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c          | 154 +++++-
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 1145 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v5 1/5] net/iavf: add support for FDIR basic rule
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
@ 2020-04-21  6:19         ` Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 2/5] net/iavf: add support for FDIR GTPU Simei Su
                           ` (4 subsequent siblings)
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-21  6:19 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch adds FDIR create/destroy/validate function in AVF.
Common pattern and queue/qgroup/passthru/drop actions are supported.

Signed-off-by: Simei Su <simei.su@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
---
 doc/guides/rel_notes/release_20_05.rst |   1 +
 drivers/net/iavf/Makefile              |   1 +
 drivers/net/iavf/iavf.h                |  17 +
 drivers/net/iavf/iavf_fdir.c           | 747 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_vchnl.c          | 154 ++++++-
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 920 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fdir.c

diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index bacd4c6..ea0d092 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -102,6 +102,7 @@ New Features
   Update the Intel iavf driver with new features and improvements, including:
 
   * Added generic filter support.
+  * Added advanced iavf with FDIR capability.
 
 * **Added a new driver for Intel Foxville I225 devices.**
 
diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
index a809180..fabe510 100644
--- a/drivers/net/iavf/Makefile
+++ b/drivers/net/iavf/Makefile
@@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
 ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
 endif
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 78bdaff..30208d4 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,17 @@ struct iavf_vsi {
 struct iavf_flow_parser_node;
 TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
 
+struct iavf_fdir_conf {
+	struct virtchnl_fdir_add add_fltr;
+	struct virtchnl_fdir_del del_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
+struct iavf_fdir_info {
+	struct iavf_fdir_conf conf;
+};
+
 /* TODO: is that correct to assume the max number to be 16 ?*/
 #define IAVF_MAX_MSIX_VECTORS   16
 
@@ -131,6 +142,8 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+
+	struct iavf_fdir_info fdir; /* flow director info */
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -252,4 +265,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
 int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
 			 struct rte_ether_addr *addr, bool add);
 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
new file mode 100644
index 0000000..9b03d29
--- /dev/null
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -0,0 +1,747 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+#include "virtchnl.h"
+
+#define IAVF_FDIR_MAX_QREGION_SIZE 128
+
+#define IAVF_FDIR_IPV6_TC_OFFSET 20
+#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
+
+#define IAVF_FDIR_INSET_ETH (\
+	IAVF_INSET_ETHERTYPE)
+
+#define IAVF_FDIR_INSET_ETH_IPV4 (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6 (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+};
+
+static struct iavf_flow_parser iavf_fdir_parser;
+
+static int
+iavf_fdir_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+		parser = &iavf_fdir_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fdir_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fdir_parser, ad);
+}
+
+static int
+iavf_fdir_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	struct iavf_fdir_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fdir rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_fdir_add(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to add filter rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+iavf_fdir_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fdir_conf *)flow->rule;
+
+	ret = iavf_fdir_del(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to delete filter rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return 0;
+}
+
+static int
+iavf_fdir_validation(struct iavf_adapter *ad,
+		__rte_unused struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fdir_conf *filter = meta;
+	int ret;
+
+	ret = iavf_fdir_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return 0;
+};
+
+static struct iavf_flow_engine iavf_fdir_engine = {
+	.init = iavf_fdir_init,
+	.uninit = iavf_fdir_uninit,
+	.create = iavf_fdir_create,
+	.destroy = iavf_fdir_destroy,
+	.validation = iavf_fdir_validation,
+	.type = IAVF_FLOW_ENGINE_FDIR,
+};
+
+static int
+iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
+			struct rte_flow_error *error,
+			const struct rte_flow_action *act,
+			struct virtchnl_filter_action *filter_action)
+{
+	const struct rte_flow_action_rss *rss = act->conf;
+	uint32_t i;
+
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid action.");
+		return -rte_errno;
+	}
+
+	if (rss->queue_num <= 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Queue region size can't be 0 or 1.");
+		return -rte_errno;
+	}
+
+	/* check if queue index for queue region is continuous */
+	for (i = 0; i < rss->queue_num - 1; i++) {
+		if (rss->queue[i + 1] != rss->queue[i] + 1) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, act,
+					"Discontinuous queue region");
+			return -rte_errno;
+		}
+	}
+
+	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"Invalid queue region indexes.");
+		return -rte_errno;
+	}
+
+	if (!(rte_is_power_of_2(rss->queue_num) &&
+		rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"The region size should be any of the following values:"
+				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+				"of queues do not exceed the VSI allocation.");
+		return -rte_errno;
+	}
+
+	filter_action->act_conf.queue.index = rss->queue[0];
+	filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_action(struct iavf_adapter *ad,
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint32_t dest_num = 0;
+	int ret;
+
+	int number = 0;
+	struct virtchnl_filter_action *filter_action;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_DROP;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+
+			act_q = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+
+			if (filter_action->act_conf.queue.index >=
+				ad->eth_dev->data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue for FDIR.");
+				return -rte_errno;
+			}
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+
+			ret = iavf_fdir_parse_action_qregion(ad,
+						error, actions, filter_action);
+			if (ret)
+				return ret;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, actions,
+					"Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
+			const struct rte_flow_item pattern[],
+			struct rte_flow_error *error,
+			struct iavf_fdir_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = IAVF_INSET_NONE;
+
+	enum rte_flow_item_type next_type;
+	uint16_t ether_type;
+
+	int layer = 0;
+	struct virtchnl_proto_hdr *hdr;
+
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not support range");
+		}
+
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			next_type = (item + 1)->type;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+				(!eth_spec || !eth_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "NULL eth spec/mask.");
+				return -rte_errno;
+			}
+
+			if (eth_spec && eth_mask) {
+				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid MAC_addr mask.");
+					return -rte_errno;
+				}
+			}
+
+			if (eth_spec && eth_mask && eth_mask->type) {
+				if (eth_mask->type != RTE_BE16(0xffff)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid type mask.");
+					return -rte_errno;
+				}
+
+				ether_type = rte_be_to_cpu_16(eth_spec->type);
+				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+					ether_type == RTE_ETHER_TYPE_IPV6) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Unsupported ether_type.");
+					return -rte_errno;
+				}
+
+				input_set |= IAVF_INSET_ETHERTYPE;
+				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+
+				rte_memcpy(hdr->buffer,
+					eth_spec, sizeof(*eth_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.fragment_offset ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+
+				if (ipv4_mask->hdr.type_of_service ==
+								UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TOS;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_PROTO;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV4_TTL;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					input_set |= IAVF_INSET_IPV4_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv4_spec->hdr,
+					sizeof(ipv4_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+
+				if ((ipv6_mask->hdr.vtc_flow &
+					rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+					== rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
+					input_set |= IAVF_INSET_IPV6_TC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
+				}
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.src_addr))) {
+					input_set |= IAVF_INSET_IPV6_SRC;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+				}
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					ipv6_addr_mask,
+					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+					input_set |= IAVF_INSET_IPV6_DST;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+				}
+
+				rte_memcpy(hdr->buffer,
+					&ipv6_spec->hdr,
+					sizeof(ipv6_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				if (udp_mask->hdr.dgram_len ||
+					udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_UDP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&udp_spec->hdr,
+						sizeof(udp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_TCP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&tcp_spec->hdr,
+						sizeof(tcp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+			if (sctp_spec && sctp_mask) {
+				if (sctp_mask->hdr.cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM, item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_SRC_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					input_set |= IAVF_INSET_SCTP_DST_PORT;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+					rte_memcpy(hdr->buffer,
+						&sctp_spec->hdr,
+						sizeof(sctp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, item,
+			"Protocol header layers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+iavf_fdir_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_fdir_conf *filter = &vf->fdir.conf;
+	struct iavf_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		goto error;
+
+	input_set = filter->input_set;
+	if (!input_set || input_set & ~item->input_set_mask) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
+				"Invalid input set");
+		ret = -rte_errno;
+		goto error;
+	}
+
+	ret = iavf_fdir_parse_action(ad, actions, error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(item);
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_fdir_parser = {
+	.engine = &iavf_fdir_engine,
+	.array = iavf_fdir_pattern,
+	.array_len = RTE_DIM(iavf_fdir_pattern),
+	.parse_pattern_action = iavf_fdir_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fdir_engine_register)
+{
+	iavf_register_flow_engine(&iavf_fdir_engine);
+}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index b913f06..805d308 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -340,7 +340,8 @@
 	 */
 
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
-		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+		VIRTCHNL_VF_OFFLOAD_FDIR_PF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -855,3 +856,154 @@
 
 	return err;
 }
+
+int
+iavf_fdir_add(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 0;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+	filter->flow_id = fdir_ret->flow_id;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"Succeed in adding rule request by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to no hw resource");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to the rule is already existed");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to the rule is conflict with existing rule");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to the hw doesn't support");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"Failed to add rule request due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_del(struct iavf_adapter *adapter,
+	struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_del *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->del_fltr.flow_id = filter->flow_id;
+
+	args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->del_fltr);
+	args.in_args_size = sizeof(filter->del_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"Succeed in deleting rule request by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR,
+			"Failed to delete rule request due to this rule doesn't exist");
+		return -1;
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+		PMD_DRV_LOG(ERR,
+			"Failed to delete rule request due to time out for programming");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"Failed to delete rule request due to other reasons");
+		return -1;
+	}
+
+	return 0;
+};
+
+int
+iavf_fdir_check(struct iavf_adapter *adapter,
+		struct iavf_fdir_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_fdir_add *fdir_ret;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->add_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+	args.in_args = (uint8_t *)(&filter->add_fltr);
+	args.in_args_size = sizeof(*(&filter->add_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+		return err;
+	}
+
+	fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+
+	if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+		PMD_DRV_LOG(INFO,
+			"Succeed in checking rule request by PF");
+	} else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR,
+			"Failed to check rule request due to parameters validation"
+			" or HW doesn't support");
+		return -1;
+	} else {
+		PMD_DRV_LOG(ERR,
+			"Failed to check rule request due to other reasons");
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 99a33e9..2cc772a 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -11,6 +11,7 @@ sources = files(
 	'iavf_rxtx.c',
 	'iavf_vchnl.c',
 	'iavf_generic_flow.c',
+	'iavf_fdir.c',
 )
 
 if arch_subdir == 'x86'
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v5 2/5] net/iavf: add support for FDIR GTPU
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 1/5] net/iavf: add support for FDIR basic rule Simei Su
@ 2020-04-21  6:19         ` Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
                           ` (3 subsequent siblings)
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-21  6:19 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables GTPU with TEID and QFI for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 63 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 63 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 9b03d29..7c0bb14 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -67,6 +67,14 @@
 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
 
+#define IAVF_FDIR_INSET_GTPU (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID)
+
+#define IAVF_FDIR_INSET_GTPU_EH (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -77,6 +85,8 @@
 	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -362,6 +372,8 @@
 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -664,6 +676,57 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_GTPU:
+			gtp_spec = item->spec;
+			gtp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
+
+			if (gtp_spec && gtp_mask) {
+				if (gtp_mask->v_pt_rsv_flags ||
+					gtp_mask->msg_type ||
+					gtp_mask->msg_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid GTP mask");
+					return -rte_errno;
+				}
+
+				if (gtp_mask->teid == UINT32_MAX) {
+					input_set |= IAVF_INSET_GTPU_TEID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
+				}
+
+				rte_memcpy(hdr->buffer,
+					gtp_spec, sizeof(*gtp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+			gtp_psc_spec = item->spec;
+			gtp_psc_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+
+			if (gtp_psc_spec && gtp_psc_mask) {
+				if (gtp_psc_mask->qfi == UINT8_MAX) {
+					input_set |= IAVF_INSET_GTPU_QFI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
+				}
+
+				rte_memcpy(hdr->buffer, gtp_psc_spec,
+					sizeof(*gtp_psc_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v5 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 1/5] net/iavf: add support for FDIR basic rule Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 2/5] net/iavf: add support for FDIR GTPU Simei Su
@ 2020-04-21  6:19         ` Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 4/5] net/iavf: add support for FDIR PFCP Simei Su
                           ` (2 subsequent siblings)
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-21  6:19 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables L2TPv3 with SESSION_ID, ESP/AH with SPI, NAT-T
with SPI and IP src/dst for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 91 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 7c0bb14..1e59c7b 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -75,6 +75,23 @@
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
 
+#define IAVF_FDIR_INSET_L2TPV3OIP (\
+	IAVF_L2TPV3OIP_SESSION_ID)
+
+#define IAVF_FDIR_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -87,6 +104,14 @@
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_GTPU,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -374,6 +399,9 @@
 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
+	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
+	const struct rte_flow_item_esp *esp_spec, *esp_mask;
+	const struct rte_flow_item_ah *ah_spec, *ah_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -727,6 +755,69 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+			l2tpv3oip_spec = item->spec;
+			l2tpv3oip_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+
+			if (l2tpv3oip_spec && l2tpv3oip_mask) {
+				if (l2tpv3oip_mask->session_id == UINT32_MAX) {
+					input_set |= IAVF_L2TPV3OIP_SESSION_ID;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+				}
+
+				rte_memcpy(hdr->buffer, l2tpv3oip_spec,
+					sizeof(*l2tpv3oip_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_ESP:
+			esp_spec = item->spec;
+			esp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+			if (esp_spec && esp_mask) {
+				if (esp_mask->hdr.spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_ESP_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, &esp_spec->hdr,
+					sizeof(esp_spec->hdr));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_AH:
+			ah_spec = item->spec;
+			ah_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+			if (ah_spec && ah_mask) {
+				if (ah_mask->spi == UINT32_MAX) {
+					input_set |= IAVF_INSET_AH_SPI;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+				}
+
+				rte_memcpy(hdr->buffer, ah_spec,
+					sizeof(*ah_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v5 4/5] net/iavf: add support for FDIR PFCP
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
                           ` (2 preceding siblings ...)
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
@ 2020-04-21  6:19         ` Simei Su
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 5/5] net/iavf: add support for FDIR mark action Simei Su
  2020-04-21  6:40         ` [dpdk-dev] [PATCH v5 0/5] net/iavf: support FDIR capabiltiy Ye Xiaolong
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-21  6:19 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables PFCP node and sesssion packets with S_FIELD
for flow director filter.

Signed-off-by: Simei Su <simei.su@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 1e59c7b..1e50a07 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -92,6 +92,9 @@
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_ESP_SPI)
 
+#define IAVF_FDIR_INSET_PFCP (\
+	IAVF_INSET_PFCP_S_FIELD)
+
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
@@ -112,6 +115,8 @@
 	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
 };
 
 static struct iavf_flow_parser iavf_fdir_parser;
@@ -402,6 +407,7 @@
 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
+	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
 	uint64_t input_set = IAVF_INSET_NONE;
 
 	enum rte_flow_item_type next_type;
@@ -818,6 +824,27 @@
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
+		case RTE_FLOW_ITEM_TYPE_PFCP:
+			pfcp_spec = item->spec;
+			pfcp_mask = item->mask;
+
+			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+			if (pfcp_spec && pfcp_mask) {
+				if (pfcp_mask->s_field == UINT8_MAX) {
+					input_set |= IAVF_INSET_PFCP_S_FIELD;
+					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+				}
+
+				rte_memcpy(hdr->buffer, pfcp_spec,
+					sizeof(*pfcp_spec));
+			}
+
+			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_VOID:
 			break;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* [dpdk-dev] [PATCH v5 5/5] net/iavf: add support for FDIR mark action
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
                           ` (3 preceding siblings ...)
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 4/5] net/iavf: add support for FDIR PFCP Simei Su
@ 2020-04-21  6:19         ` Simei Su
  2020-04-21  6:40         ` [dpdk-dev] [PATCH v5 0/5] net/iavf: support FDIR capabiltiy Ye Xiaolong
  5 siblings, 0 replies; 43+ messages in thread
From: Simei Su @ 2020-04-21  6:19 UTC (permalink / raw)
  To: qi.z.zhang, xiaolong.ye, jingjing.wu; +Cc: dev, yahui.cao, simei.su

This patch enables mark action support and takes mark only case
into consideration.

Signed-off-by: Simei Su <simei.su@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/iavf/iavf.h      |  1 +
 drivers/net/iavf/iavf_fdir.c | 45 +++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 30208d4..5fb7881 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -97,6 +97,7 @@ struct iavf_fdir_conf {
 	struct virtchnl_fdir_del del_fltr;
 	uint64_t input_set;
 	uint32_t flow_id;
+	uint32_t mark_flag;
 };
 
 struct iavf_fdir_info {
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 1e50a07..406622c 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -18,6 +18,7 @@
 #include "iavf.h"
 #include "iavf_generic_flow.h"
 #include "virtchnl.h"
+#include "iavf_rxtx.h"
 
 #define IAVF_FDIR_MAX_QREGION_SIZE 128
 
@@ -170,6 +171,9 @@
 		goto free_entry;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 1);
+
 	rte_memcpy(rule, filter, sizeof(*rule));
 	flow->rule = rule;
 
@@ -198,6 +202,9 @@
 		return -rte_errno;
 	}
 
+	if (filter->mark_flag == 1)
+		iavf_fdir_rx_proc_enable(ad, 0);
+
 	flow->rule = NULL;
 	rte_free(filter);
 
@@ -296,7 +303,9 @@
 			struct iavf_fdir_conf *filter)
 {
 	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec = NULL;
 	uint32_t dest_num = 0;
+	uint32_t mark_num = 0;
 	int ret;
 
 	int number = 0;
@@ -362,6 +371,19 @@
 			filter->add_fltr.rule_cfg.action_set.count = ++number;
 			break;
 
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark_num++;
+
+			filter->mark_flag = 1;
+			mark_spec = actions->conf;
+			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+			filter_action->type = VIRTCHNL_ACTION_MARK;
+			filter_action->act_conf.mark_id = mark_spec->id;
+
+			filter->add_fltr.rule_cfg.action_set.count = ++number;
+			break;
+
 		default:
 			rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -377,13 +399,34 @@
 		return -rte_errno;
 	}
 
-	if (dest_num == 0 || dest_num >= 2) {
+	if (dest_num >= 2) {
 		rte_flow_error_set(error, EINVAL,
 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
 			"Unsupported action combination");
 		return -rte_errno;
 	}
 
+	if (mark_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Too many mark actions");
+		return -rte_errno;
+	}
+
+	if (dest_num + mark_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			"Emtpy action");
+		return -rte_errno;
+	}
+
+	/* Mark only is equal to mark + passthru. */
+	if (dest_num == 0) {
+		filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+		filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+		filter->add_fltr.rule_cfg.action_set.count = ++number;
+	}
+
 	return 0;
 }
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [dpdk-dev] [PATCH v5 0/5] net/iavf: support FDIR capabiltiy
  2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
                           ` (4 preceding siblings ...)
  2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 5/5] net/iavf: add support for FDIR mark action Simei Su
@ 2020-04-21  6:40         ` Ye Xiaolong
  5 siblings, 0 replies; 43+ messages in thread
From: Ye Xiaolong @ 2020-04-21  6:40 UTC (permalink / raw)
  To: Simei Su; +Cc: qi.z.zhang, jingjing.wu, dev, yahui.cao

On 04/21, Simei Su wrote:
>[PATCH v5 1/5] support FDIR common patterns and actions.
>[PATCH v5 2/5] support FDIR GTPU pattern.
>[PATCH v5 3/5] support FDIR L2TPv3, ESP, AH and NAT-T pattern.
>[PATCH v5 4/5] support FDIR PFCP node and session pattern.
>[PATCH v5 5/5] support FDIR mark action.
>
>v5:
>* Do rebase on the newest codes.
>
>v4:
>* Add to check vf->vf_res.
>* Simplify some codes.
>* Specify and refine some error logs.
>
>v3:
>* Add release notes.
>* Update action structures based on virtchnl update.
>* Add VIRTCHNL_FDIR_FAILURE_RULE_EXIST condition check and modify
>  the error log.
>
>v2:
>* Update pattern and action structures based on latest virtchnl design.
>* Add upper bound check for pattern layers and action numbers.
>* Increase action number in mark only case.
>* Consider more circumstances about PF error return status.
>
>Simei Su (5):
>  net/iavf: add support for FDIR basic rule
>  net/iavf: add support for FDIR GTPU
>  net/iavf: add support for FDIR L2TPv3 and IPSec
>  net/iavf: add support for FDIR PFCP
>  net/iavf: add support for FDIR mark action
>
> doc/guides/rel_notes/release_20_05.rst |   1 +
> drivers/net/iavf/Makefile              |   1 +
> drivers/net/iavf/iavf.h                |  18 +
> drivers/net/iavf/iavf_fdir.c           | 971 +++++++++++++++++++++++++++++++++
> drivers/net/iavf/iavf_vchnl.c          | 154 +++++-
> drivers/net/iavf/meson.build           |   1 +
> 6 files changed, 1145 insertions(+), 1 deletion(-)
> create mode 100644 drivers/net/iavf/iavf_fdir.c
>
>-- 
>1.8.3.1
>

Applied to dpdk-next-net-intel, Thanks.

^ permalink raw reply	[flat|nested] 43+ messages in thread

end of thread, other threads:[~2020-04-21  6:45 UTC | newest]

Thread overview: 43+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-18  5:41 [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Simei Su
2020-03-18  5:41 ` [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-03-31  5:20   ` Cao, Yahui
2020-03-31  7:12     ` Su, Simei
2020-03-18  5:41 ` [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-03-19  1:46   ` Zhang, Qi Z
2020-03-18  5:41 ` [dpdk-dev] [PATCH 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-03-18  5:42 ` [dpdk-dev] [PATCH 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-03-18  5:42 ` [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-03-31  5:20   ` Cao, Yahui
2020-03-31  7:05     ` Su, Simei
2020-03-18  5:56 ` [dpdk-dev] [PATCH 0/5] net/iavf: support FDIR capabiltiy Stephen Hemminger
2020-03-19  8:48   ` Su, Simei
2020-04-02 13:32 ` [dpdk-dev] [PATCH v2 " Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-10  7:40     ` Cao, Yahui
2020-04-10  8:00       ` Su, Simei
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-02 13:32   ` [dpdk-dev] [PATCH v2 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-10 10:18   ` [dpdk-dev] [PATCH v3 0/5] net/iavf: support FDIR capabiltiy Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-14  7:37       ` Ye Xiaolong
2020-04-14  8:31         ` Su, Simei
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-10 10:18     ` [dpdk-dev] [PATCH v3 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-15  2:55     ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-15  2:55       ` [dpdk-dev] [PATCH v4 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-15  3:17       ` [dpdk-dev] [PATCH v4 0/5] net/iavf: support FDIR capabiltiy Zhang, Qi Z
2020-04-21  6:19       ` [dpdk-dev] [PATCH v5 " Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 1/5] net/iavf: add support for FDIR basic rule Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 2/5] net/iavf: add support for FDIR GTPU Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 4/5] net/iavf: add support for FDIR PFCP Simei Su
2020-04-21  6:19         ` [dpdk-dev] [PATCH v5 5/5] net/iavf: add support for FDIR mark action Simei Su
2020-04-21  6:40         ` [dpdk-dev] [PATCH v5 0/5] net/iavf: support FDIR capabiltiy Ye Xiaolong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.