All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yahui Cao <yahui.cao@intel.com>
To: Qiming Yang <qiming.yang@intel.com>, Wenzhuo Lu <wenzhuo.lu@intel.com>
Cc: dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>,
	Xiaolong Ye <xiaolong.ye@intel.com>,
	Beilei Xing <beilei.xing@intel.com>,
	Yahui Cao <yahui.cao@intel.com>
Subject: [dpdk-dev] [PATCH v5 3/9] net/ice: add FDIR create and destroy
Date: Mon, 30 Sep 2019 19:45:41 +0800	[thread overview]
Message-ID: <20190930114547.74803-4-yahui.cao@intel.com> (raw)
In-Reply-To: <20190930114547.74803-1-yahui.cao@intel.com>

Add ice_create_fdir_filter to create a rule. If a flow is matched by
flow director filter, filter rule will be set to HW. For now common
pattern and queue/passthru/drop/mark actions are supported.

Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 doc/guides/rel_notes/release_19_11.rst |   1 +
 drivers/net/ice/ice_ethdev.h           |   6 +
 drivers/net/ice/ice_fdir_filter.c      | 579 ++++++++++++++++++++++++-
 3 files changed, 583 insertions(+), 3 deletions(-)

diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
index bc77daff0..44a183fb1 100644
--- a/doc/guides/rel_notes/release_19_11.rst
+++ b/doc/guides/rel_notes/release_19_11.rst
@@ -63,6 +63,7 @@ New Features
   * Added support for device-specific DDP package loading.
   * Added support for handling Receive Flex Descriptor.
   * Added support for protocol extraction on per Rx queue.
+  * Added support for Flow Director filter based on generic filter framework.
   * Generic filter enhancement
     - Supported pipeline mode.
     - Supported new packet type like PPPoE for switch filter.
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 71775899b..deb8ffc06 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -247,6 +247,11 @@ TAILQ_HEAD(ice_flow_list, rte_flow);
 struct ice_flow_parser_node;
 TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);
 
+struct ice_fdir_filter_conf {
+	struct ice_fdir_fltr input;
+	uint64_t input_set;
+};
+
 /**
  *  A structure used to define fields of a FDIR related info.
  */
@@ -256,6 +261,7 @@ struct ice_fdir_info {
 	struct ice_rx_queue *rxq;
 	void *prg_pkt;                 /* memory for fdir program packet */
 	uint64_t dma_addr;             /* physic address of packet memory*/
+	struct ice_fdir_filter_conf conf;
 };
 
 struct ice_pf {
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index d18d03c32..984f54ea2 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -7,6 +7,55 @@
 #include "ice_rxtx.h"
 #include "ice_generic_flow.h"
 
+#define ICE_FDIR_IPV6_TC_OFFSET		20
+#define ICE_IPV6_TC_MASK		(0xFF << ICE_FDIR_IPV6_TC_OFFSET)
+
+#define ICE_FDIR_INSET_ETH_IPV4 (\
+	ICE_INSET_DMAC | \
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
+	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
+
+#define ICE_FDIR_INSET_ETH_IPV4_UDP (\
+	ICE_FDIR_INSET_ETH_IPV4 | \
+	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV4_TCP (\
+	ICE_FDIR_INSET_ETH_IPV4 | \
+	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
+	ICE_FDIR_INSET_ETH_IPV4 | \
+	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6 (\
+	ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
+
+#define ICE_FDIR_INSET_ETH_IPV6_UDP (\
+	ICE_FDIR_INSET_ETH_IPV6 | \
+	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6_TCP (\
+	ICE_FDIR_INSET_ETH_IPV6 | \
+	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+
+#define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
+	ICE_FDIR_INSET_ETH_IPV6 | \
+	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+static struct ice_pattern_match_item ice_fdir_pattern[] = {
+	{pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
+	{pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
+	{pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
+	{pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
+	{pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
+	{pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
+	{pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
+	{pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
+};
+
+static struct ice_flow_parser ice_fdir_parser;
+
 static const struct rte_memzone *
 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
 {
@@ -363,7 +412,7 @@ ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
 	}
 }
 
-static int __rte_unused
+static int
 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
 			uint64_t input_set, bool is_tunnel)
 {
@@ -456,7 +505,7 @@ ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
 	}
 }
 
-static void __rte_unused
+static void
 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
 		    bool is_tunnel, bool add)
 {
@@ -475,8 +524,13 @@ static int
 ice_fdir_init(struct ice_adapter *ad)
 {
 	struct ice_pf *pf = &ad->pf;
+	int ret;
+
+	ret = ice_fdir_setup(pf);
+	if (ret)
+		return ret;
 
-	return ice_fdir_setup(pf);
+	return ice_register_parser(&ice_fdir_parser, ad);
 }
 
 static void
@@ -484,15 +538,534 @@ ice_fdir_uninit(struct ice_adapter *ad)
 {
 	struct ice_pf *pf = &ad->pf;
 
+	ice_unregister_parser(&ice_fdir_parser, ad);
+
 	ice_fdir_teardown(pf);
 }
 
+static int
+ice_fdir_add_del_filter(struct ice_pf *pf,
+			struct ice_fdir_filter_conf *filter,
+			bool add)
+{
+	struct ice_fltr_desc desc;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	int ret;
+
+	filter->input.dest_vsi = pf->main_vsi->idx;
+
+	memset(&desc, 0, sizeof(desc));
+	ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
+
+	memset(pkt, 0, ICE_FDIR_PKT_LEN);
+	ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Generate dummy packet failed");
+		return -EINVAL;
+	}
+
+	return ice_fdir_programming(pf, &desc);
+}
+
+static int
+ice_fdir_create_filter(struct ice_adapter *ad,
+		       struct rte_flow *flow,
+		       void *meta,
+		       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_fdir_filter_conf *filter = meta;
+	struct ice_fdir_filter_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return -rte_errno;
+	}
+
+	ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
+			filter->input_set, false);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Profile configure failed.");
+		goto free_entry;
+	}
+
+	ret = ice_fdir_add_del_filter(pf, filter, true);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Add filter rule failed.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+	ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+ice_fdir_destroy_filter(struct ice_adapter *ad,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_fdir_filter_conf *filter;
+	int ret;
+
+	filter = (struct ice_fdir_filter_conf *)flow->rule;
+
+	ret = ice_fdir_add_del_filter(pf, filter, false);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Del filter rule failed.");
+		return -rte_errno;
+	}
+
+	ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
+	flow->rule = NULL;
+
+	rte_free(filter);
+
+	return 0;
+}
+
 static struct ice_flow_engine ice_fdir_engine = {
 	.init = ice_fdir_init,
 	.uninit = ice_fdir_uninit,
+	.create = ice_fdir_create_filter,
+	.destroy = ice_fdir_destroy_filter,
 	.type = ICE_FLOW_ENGINE_FDIR,
 };
 
+static int
+ice_fdir_parse_action(struct ice_adapter *ad,
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error,
+		      struct ice_fdir_filter_conf *filter)
+{
+	struct ice_pf *pf = &ad->pf;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_mark *mark_spec = NULL;
+	uint32_t dest_num = 0;
+	uint32_t mark_num = 0;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+
+			act_q = actions->conf;
+			filter->input.q_index = act_q->index;
+			if (filter->input.q_index >=
+					pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "Invalid queue for FDIR.");
+				return -rte_errno;
+			}
+			filter->input.dest_ctl =
+				ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter->input.dest_ctl =
+				ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+			break;
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+
+			filter->input.dest_ctl =
+				ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+			filter->input.q_index = 0;
+			break;
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark_num++;
+
+			mark_spec = actions->conf;
+			filter->input.fltr_id = mark_spec->id;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	if (mark_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Too many mark actions");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+		       const struct rte_flow_item pattern[],
+		       struct rte_flow_error *error,
+		       struct ice_fdir_filter_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = ICE_INSET_NONE;
+	uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
+	uint32_t vtc_flow_cpu;
+
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item,
+					"Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (!rte_is_zero_ether_addr(&eth_spec->src) ||
+				    !rte_is_zero_ether_addr(&eth_mask->src)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Src mac not support");
+					return -rte_errno;
+				}
+
+				if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid mac addr mask");
+					return -rte_errno;
+				}
+
+				input_set |= ICE_INSET_DMAC;
+				rte_memcpy(&filter->input.ext_data.dst_mac,
+					   &eth_spec->dst,
+					   RTE_ETHER_ADDR_LEN);
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (ipv4_spec && ipv4_mask) {
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+				    ipv4_mask->hdr.total_length ||
+				    ipv4_mask->hdr.packet_id ||
+				    ipv4_mask->hdr.fragment_offset ||
+				    ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+
+				filter->input.ip.v4.dst_ip =
+					ipv4_spec->hdr.src_addr;
+				filter->input.ip.v4.src_ip =
+					ipv4_spec->hdr.dst_addr;
+				filter->input.ip.v4.tos =
+					ipv4_spec->hdr.type_of_service;
+				filter->input.ip.v4.ttl =
+					ipv4_spec->hdr.time_to_live;
+				filter->input.ip.v4.proto =
+					ipv4_spec->hdr.next_proto_id;
+			}
+
+			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (ipv6_spec && ipv6_mask) {
+				/* Check IPv6 mask and update input set */
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv6 mask");
+					return -rte_errno;
+				}
+
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+
+				if ((ipv6_mask->hdr.vtc_flow &
+				     rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+				    == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+					input_set |= ICE_INSET_IPV6_TC;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_NEXT_HDR;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+
+				rte_memcpy(filter->input.ip.v6.dst_ip,
+					   ipv6_spec->hdr.src_addr, 16);
+				rte_memcpy(filter->input.ip.v6.src_ip,
+					   ipv6_spec->hdr.dst_addr, 16);
+
+				vtc_flow_cpu =
+				      rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+				filter->input.ip.v6.tc =
+					(uint8_t)(vtc_flow_cpu >>
+						  ICE_FDIR_IPV6_TC_OFFSET);
+				filter->input.ip.v6.proto =
+					ipv6_spec->hdr.proto;
+				filter->input.ip.v6.hlim =
+					ipv6_spec->hdr.hop_limits;
+			}
+
+			flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (tcp_spec && tcp_mask) {
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+				    tcp_mask->hdr.recv_ack ||
+				    tcp_mask->hdr.data_off ||
+				    tcp_mask->hdr.tcp_flags ||
+				    tcp_mask->hdr.rx_win ||
+				    tcp_mask->hdr.cksum ||
+				    tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TCP_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TCP_DST_PORT;
+
+				/* Get filter info */
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+					filter->input.ip.v4.dst_port =
+						tcp_spec->hdr.src_port;
+					filter->input.ip.v4.src_port =
+						tcp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+					filter->input.ip.v6.dst_port =
+						tcp_spec->hdr.src_port;
+					filter->input.ip.v6.src_port =
+						tcp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+				}
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (udp_spec && udp_mask) {
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_UDP_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_UDP_DST_PORT;
+
+				/* Get filter info */
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+					filter->input.ip.v4.dst_port =
+						udp_spec->hdr.src_port;
+					filter->input.ip.v4.src_port =
+						udp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+					filter->input.ip.v6.src_port =
+						udp_spec->hdr.src_port;
+					filter->input.ip.v6.dst_port =
+						udp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+				}
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (sctp_spec && sctp_mask) {
+				/* Check SCTP mask and update input set */
+				if (sctp_mask->hdr.cksum) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SCTP_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_SCTP_DST_PORT;
+
+				/* Get filter info */
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+					filter->input.ip.v4.dst_port =
+						sctp_spec->hdr.src_port;
+					filter->input.ip.v4.src_port =
+						sctp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+					filter->input.ip.v6.dst_port =
+						sctp_spec->hdr.src_port;
+					filter->input.ip.v6.src_port =
+						sctp_spec->hdr.dst_port;
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+				}
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   item,
+				   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	filter->input.flow_type = flow_type;
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+ice_fdir_parse(struct ice_adapter *ad,
+	       struct ice_pattern_match_item *array,
+	       uint32_t array_len,
+	       const struct rte_flow_item pattern[],
+	       const struct rte_flow_action actions[],
+	       void **meta,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
+	struct ice_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+	item = ice_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		return ret;
+	input_set = filter->input_set;
+	if (!input_set || input_set & ~item->input_set_mask) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	ret = ice_fdir_parse_action(ad, actions, error, filter);
+	if (ret)
+		return ret;
+
+	*meta = filter;
+
+	return 0;
+}
+
+static struct ice_flow_parser ice_fdir_parser = {
+	.engine = &ice_fdir_engine,
+	.array = ice_fdir_pattern,
+	.array_len = RTE_DIM(ice_fdir_pattern),
+	.parse_pattern_action = ice_fdir_parse,
+	.stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
 RTE_INIT(ice_fdir_engine_register)
 {
 	ice_register_flow_engine(&ice_fdir_engine);
-- 
2.17.1


  parent reply	other threads:[~2019-09-30  4:01 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-24 21:52 [dpdk-dev] [PATCH v3 0/8] net/ice: add ice Flow Director driver Yahui Cao
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 1/8] net/ice: enable flow director engine Yahui Cao
2019-09-25  3:19   ` Gavin Hu (Arm Technology China)
2019-09-25  5:20     ` Xing, Beilei
2019-09-25  6:37     ` Cao, Yahui
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 2/8] net/ice: configure HW FDIR rule Yahui Cao
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 3/8] net/ice: add FDIR create and destroy Yahui Cao
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 4/8] net/ice: enable FDIR queue group Yahui Cao
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 5/8] net/ice: add FDIR counter resource init/release Yahui Cao
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 6/8] net/ice: add FDIR counter support Yahui Cao
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 7/8] net/ice: reject duplicate flow for FDIR Yahui Cao
2019-09-24 21:52 ` [dpdk-dev] [PATCH v3 8/8] net/ice: add FDIR vxlan tunnel support Yahui Cao
2019-09-27 17:04 ` [dpdk-dev] [PATCH v4 0/8] net/ice: add ice Flow Director driver Yahui Cao
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 1/8] net/ice: enable flow director engine Yahui Cao
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 2/8] net/ice: configure HW FDIR rule Yahui Cao
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 3/8] net/ice: add FDIR create and destroy Yahui Cao
2019-09-27 14:21     ` Ferruh Yigit
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 4/8] net/ice: enable FDIR queue group Yahui Cao
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 5/8] net/ice: add FDIR counter resource init/release Yahui Cao
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 6/8] net/ice: add FDIR counter support Yahui Cao
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 7/8] net/ice: reject duplicate flow for FDIR Yahui Cao
2019-09-27 17:04   ` [dpdk-dev] [PATCH v4 8/8] net/ice: add FDIR vxlan tunnel support Yahui Cao
2019-09-30 11:45   ` [dpdk-dev] [PATCH v5 0/9] net/ice: add ice Flow Director driver Yahui Cao
2019-09-30  8:42     ` Zhang, Qi Z
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 1/9] net/ice: enable flow director engine Yahui Cao
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 2/9] net/ice: configure HW FDIR rule Yahui Cao
2019-09-30 11:45     ` Yahui Cao [this message]
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 4/9] net/ice: enable FDIR queue group Yahui Cao
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 5/9] net/ice: add FDIR counter resource init/release Yahui Cao
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 6/9] net/ice: add FDIR counter support Yahui Cao
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 7/9] net/ice: reject duplicate flow for FDIR Yahui Cao
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 8/9] net/ice: add FDIR vxlan tunnel support Yahui Cao
2019-09-30 11:45     ` [dpdk-dev] [PATCH v5 9/9] net/ice: add FDIR GTPU " Yahui Cao
2019-10-17 16:04     ` [dpdk-dev] [PATCH v6 0/9] net/ice: add ice Flow Director driver Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 1/9] net/ice: enable flow director engine Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 2/9] net/ice: configure HW FDIR rule Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 3/9] net/ice: add FDIR create and destroy Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 4/9] net/ice: enable FDIR queue group Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 5/9] net/ice: add FDIR counter resource init/release Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 6/9] net/ice: add FDIR counter support Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 7/9] net/ice: reject duplicate flow for FDIR Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 8/9] net/ice: add FDIR vxlan tunnel support Yahui Cao
2019-10-17 16:04       ` [dpdk-dev] [PATCH v6 9/9] net/ice: add FDIR GTPU " Yahui Cao
2019-10-18 11:15       ` [dpdk-dev] [PATCH v7 0/9] net/ice: add ice Flow Director driver Yahui Cao
2019-10-18  6:43         ` Ye Xiaolong
2019-10-18 11:15         ` [dpdk-dev] [PATCH v7 1/9] net/ice: enable flow director engine Yahui Cao
2019-10-18 11:15         ` [dpdk-dev] [PATCH v7 2/9] net/ice: configure HW FDIR rule Yahui Cao
2019-10-18 11:15         ` [dpdk-dev] [PATCH v7 3/9] net/ice: add FDIR create and destroy Yahui Cao
2019-10-18 11:15         ` [dpdk-dev] [PATCH v7 4/9] net/ice: enable FDIR queue group Yahui Cao
2019-10-18 11:15         ` [dpdk-dev] [PATCH v7 5/9] net/ice: add FDIR counter resource init/release Yahui Cao
2019-10-18 11:15         ` [dpdk-dev] [PATCH v7 6/9] net/ice: add FDIR counter support Yahui Cao
2019-10-18 11:16         ` [dpdk-dev] [PATCH v7 7/9] net/ice: reject duplicate flow for FDIR Yahui Cao
2019-10-18 11:16         ` [dpdk-dev] [PATCH v7 8/9] net/ice: add FDIR vxlan tunnel support Yahui Cao
2019-10-18 11:16         ` [dpdk-dev] [PATCH v7 9/9] net/ice: add FDIR GTPU " Yahui Cao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190930114547.74803-4-yahui.cao@intel.com \
    --to=yahui.cao@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=wenzhuo.lu@intel.com \
    --cc=xiaolong.ye@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.