All of lore.kernel.org
 help / color / mirror / Atom feed
From: Wei Zhao <wei.zhao1@intel.com>
To: dev@dpdk.org
Cc: stable@dpdk.org, qi.z.zhang@intel.com, nannan.lu@intel.com,
	Wei Zhao <wei.zhao1@intel.com>
Subject: [dpdk-dev] [PATCH v3 4/4] net/ice: add input set byte number check
Date: Sun, 28 Jun 2020 11:21:51 +0800	[thread overview]
Message-ID: <20200628032151.71098-5-wei.zhao1@intel.com> (raw)
In-Reply-To: <20200628032151.71098-1-wei.zhao1@intel.com>

This patch add the total input set byte number check,
as there is a hardware requirement for the total number
of 32 byte.

Fixes: 47d460d63233 ("net/ice: rework switch filter")
Cc: stable@dpdk.org

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 43 +++++++++++++++++++++++++++--
 1 file changed, 40 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index c1ea74c73..a4d7fcb14 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -25,7 +25,8 @@
 #include "ice_generic_flow.h"
 
 
-#define MAX_QGRP_NUM_TYPE 7
+#define MAX_QGRP_NUM_TYPE	7
+#define MAX_INPUT_SET_BYTE	32
 #define ICE_PPP_IPV4_PROTO	0x0021
 #define ICE_PPP_IPV6_PROTO	0x0057
 #define ICE_IPV4_PROTO_NVGRE	0x002F
@@ -473,6 +474,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
 	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
 	uint64_t input_set = ICE_INSET_NONE;
+	uint16_t feild_vec_byte = 0;
 	uint16_t tunnel_valid = 0;
 	bool pppoe_elem_valid = 0;
 	bool pppoe_patt_valid = 0;
@@ -540,6 +542,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						m->src_addr[j] =
 						eth_mask->src.addr_bytes[j];
 						i = 1;
+						feild_vec_byte++;
 					}
 					if (eth_mask->dst.addr_bytes[j]) {
 						h->dst_addr[j] =
@@ -547,6 +550,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						m->dst_addr[j] =
 						eth_mask->dst.addr_bytes[j];
 						i = 1;
+						feild_vec_byte++;
 					}
 				}
 				if (i)
@@ -557,6 +561,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						eth_spec->type;
 					list[t].m_u.ethertype.ethtype_id =
 						eth_mask->type;
+					feild_vec_byte += 2;
 					t++;
 				}
 			}
@@ -616,24 +621,28 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						ipv4_spec->hdr.src_addr;
 					list[t].m_u.ipv4_hdr.src_addr =
 						ipv4_mask->hdr.src_addr;
+					feild_vec_byte += 2;
 				}
 				if (ipv4_mask->hdr.dst_addr) {
 					list[t].h_u.ipv4_hdr.dst_addr =
 						ipv4_spec->hdr.dst_addr;
 					list[t].m_u.ipv4_hdr.dst_addr =
 						ipv4_mask->hdr.dst_addr;
+					feild_vec_byte += 2;
 				}
 				if (ipv4_mask->hdr.time_to_live) {
 					list[t].h_u.ipv4_hdr.time_to_live =
 						ipv4_spec->hdr.time_to_live;
 					list[t].m_u.ipv4_hdr.time_to_live =
 						ipv4_mask->hdr.time_to_live;
+					feild_vec_byte++;
 				}
 				if (ipv4_mask->hdr.next_proto_id) {
 					list[t].h_u.ipv4_hdr.protocol =
 						ipv4_spec->hdr.next_proto_id;
 					list[t].m_u.ipv4_hdr.protocol =
 						ipv4_mask->hdr.next_proto_id;
+					feild_vec_byte++;
 				}
 				if ((ipv4_spec->hdr.next_proto_id &
 					ipv4_mask->hdr.next_proto_id) ==
@@ -644,6 +653,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						ipv4_spec->hdr.type_of_service;
 					list[t].m_u.ipv4_hdr.tos =
 						ipv4_mask->hdr.type_of_service;
+					feild_vec_byte++;
 				}
 				t++;
 			}
@@ -721,12 +731,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						ipv6_spec->hdr.src_addr[j];
 						s->src_addr[j] =
 						ipv6_mask->hdr.src_addr[j];
+						feild_vec_byte++;
 					}
 					if (ipv6_mask->hdr.dst_addr[j]) {
 						f->dst_addr[j] =
 						ipv6_spec->hdr.dst_addr[j];
 						s->dst_addr[j] =
 						ipv6_mask->hdr.dst_addr[j];
+						feild_vec_byte++;
 					}
 				}
 				if (ipv6_mask->hdr.proto) {
@@ -734,12 +746,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						ipv6_spec->hdr.proto;
 					s->next_hdr =
 						ipv6_mask->hdr.proto;
+					feild_vec_byte++;
 				}
 				if (ipv6_mask->hdr.hop_limits) {
 					f->hop_limit =
 						ipv6_spec->hdr.hop_limits;
 					s->hop_limit =
 						ipv6_mask->hdr.hop_limits;
+					feild_vec_byte++;
 				}
 				if (ipv6_mask->hdr.vtc_flow &
 						rte_cpu_to_be_32
@@ -757,6 +771,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 							RTE_IPV6_HDR_TC_MASK) >>
 							RTE_IPV6_HDR_TC_SHIFT;
 					s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
+					feild_vec_byte += 4;
 				}
 				t++;
 			}
@@ -802,14 +817,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						udp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						udp_mask->hdr.src_port;
+					feild_vec_byte += 2;
 				}
 				if (udp_mask->hdr.dst_port) {
 					list[t].h_u.l4_hdr.dst_port =
 						udp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						udp_mask->hdr.dst_port;
+					feild_vec_byte += 2;
 				}
-						t++;
+				t++;
 			}
 			break;
 
@@ -854,12 +871,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						tcp_spec->hdr.src_port;
 					list[t].m_u.l4_hdr.src_port =
 						tcp_mask->hdr.src_port;
+					feild_vec_byte += 2;
 				}
 				if (tcp_mask->hdr.dst_port) {
 					list[t].h_u.l4_hdr.dst_port =
 						tcp_spec->hdr.dst_port;
 					list[t].m_u.l4_hdr.dst_port =
 						tcp_mask->hdr.dst_port;
+					feild_vec_byte += 2;
 				}
 				t++;
 			}
@@ -899,12 +918,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						sctp_spec->hdr.src_port;
 					list[t].m_u.sctp_hdr.src_port =
 						sctp_mask->hdr.src_port;
+					feild_vec_byte += 2;
 				}
 				if (sctp_mask->hdr.dst_port) {
 					list[t].h_u.sctp_hdr.dst_port =
 						sctp_spec->hdr.dst_port;
 					list[t].m_u.sctp_hdr.dst_port =
 						sctp_mask->hdr.dst_port;
+					feild_vec_byte += 2;
 				}
 				t++;
 			}
@@ -942,6 +963,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						vxlan_mask->vni[0];
 					input_set |=
 						ICE_INSET_TUN_VXLAN_VNI;
+					feild_vec_byte += 2;
 				}
 				t++;
 			}
@@ -978,6 +1000,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 						nvgre_mask->tni[0];
 					input_set |=
 						ICE_INSET_TUN_NVGRE_TNI;
+					feild_vec_byte += 2;
 				}
 				t++;
 			}
@@ -1006,6 +1029,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].m_u.vlan_hdr.vlan =
 						vlan_mask->tci;
 					input_set |= ICE_INSET_VLAN_OUTER;
+					feild_vec_byte += 2;
 				}
 				if (vlan_mask->inner_type) {
 					list[t].h_u.vlan_hdr.type =
@@ -1013,6 +1037,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].m_u.vlan_hdr.type =
 						vlan_mask->inner_type;
 					input_set |= ICE_INSET_ETHERTYPE;
+					feild_vec_byte += 2;
 				}
 				t++;
 			}
@@ -1053,6 +1078,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].m_u.pppoe_hdr.session_id =
 						pppoe_mask->session_id;
 					input_set |= ICE_INSET_PPPOE_SESSION;
+					feild_vec_byte += 2;
 				}
 				t++;
 				pppoe_elem_valid = 1;
@@ -1085,7 +1111,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					list[t].m_u.pppoe_hdr.ppp_prot_id =
 						pppoe_proto_mask->proto_id;
 					input_set |= ICE_INSET_PPPOE_PROTO;
-
+					feild_vec_byte += 2;
 					pppoe_prot_valid = 1;
 				}
 				if ((pppoe_proto_mask->proto_id &
@@ -1142,6 +1168,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				list[t].m_u.esp_hdr.spi =
 					esp_mask->hdr.spi;
 				input_set |= ICE_INSET_ESP_SPI;
+				feild_vec_byte += 4;
 				t++;
 			}
 
@@ -1198,6 +1225,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				list[t].m_u.ah_hdr.spi =
 					ah_mask->spi;
 				input_set |= ICE_INSET_AH_SPI;
+				feild_vec_byte += 4;
 				t++;
 			}
 
@@ -1237,6 +1265,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 				list[t].m_u.l2tpv3_sess_hdr.session_id =
 					l2tp_mask->session_id;
 				input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
+				feild_vec_byte += 4;
 				t++;
 			}
 
@@ -1342,6 +1371,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 			*tun_type = ICE_SW_IPV6_UDP;
 	}
 
+	if (feild_vec_byte >= MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item,
+			"too much input set");
+		return -ENOTSUP;
+	}
+
 	*lkups_num = t;
 
 	return input_set;
-- 
2.19.1


  parent reply	other threads:[~2020-06-28  3:47 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-05  7:40 [dpdk-dev] [PATCH 0/4] enable more PPPoE packet type for switch Wei Zhao
2020-06-05  7:40 ` [dpdk-dev] [PATCH 1/4] net/ice: add support " Wei Zhao
2020-06-05  7:40 ` [dpdk-dev] [PATCH 2/4] net/ice: add redirect support for VSI list rule Wei Zhao
2020-06-05  7:40 ` [dpdk-dev] [PATCH 3/4] net/ice: add check for NVGRE protocol Wei Zhao
2020-06-05  7:40 ` [dpdk-dev] [PATCH 4/4] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-17  6:14 ` [dpdk-dev] [PATCH v2 0/4] enable more PPPoE packet type for switch Wei Zhao
2020-06-17  6:14   ` [dpdk-dev] [PATCH v2 1/4] net/ice: add support " Wei Zhao
2020-06-17  6:14   ` [dpdk-dev] [PATCH v2 2/4] net/ice: add redirect support for VSI list rule Wei Zhao
2020-06-22 15:25     ` Zhang, Qi Z
2020-06-17  6:14   ` [dpdk-dev] [PATCH v2 3/4] net/ice: add check for NVGRE protocol Wei Zhao
2020-06-22 15:49     ` Zhang, Qi Z
2020-06-23  1:11       ` Zhao1, Wei
2020-06-17  6:14   ` [dpdk-dev] [PATCH v2 4/4] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-22 15:36     ` Zhang, Qi Z
2020-06-23  1:12       ` Zhao1, Wei
2020-06-28  3:21   ` [dpdk-dev] [PATCH v3 0/4] enable more PPPoE packet type for switch Wei Zhao
2020-06-28  3:21     ` [dpdk-dev] [PATCH v3 1/4] net/ice: add support " Wei Zhao
2020-06-28  3:21     ` [dpdk-dev] [PATCH v3 2/4] net/ice: fix tunnel type for switch rule Wei Zhao
2020-06-28  3:21     ` [dpdk-dev] [PATCH v3 3/4] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-28  3:21     ` Wei Zhao [this message]
2020-06-28  5:01   ` [dpdk-dev] [PATCH v3 0/4] enable more PPPoE packet type for switch Wei Zhao
2020-06-28  5:01     ` [dpdk-dev] [PATCH v3 1/4] net/ice: add support " Wei Zhao
2020-06-28  5:01     ` [dpdk-dev] [PATCH v3 2/4] net/ice: fix tunnel type for switch rule Wei Zhao
2020-06-28  5:01     ` [dpdk-dev] [PATCH v3 3/4] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-29  1:55       ` Zhang, Qi Z
2020-06-29  2:01         ` Zhao1, Wei
2020-06-28  5:01     ` [dpdk-dev] [PATCH v3 4/4] net/ice: add input set byte number check Wei Zhao
2020-06-28  5:28     ` [dpdk-dev] [PATCH v4 0/4] enable more PPPoE packet type for switch Wei Zhao
2020-06-28  5:28       ` [dpdk-dev] [PATCH v4 1/4] net/ice: add support " Wei Zhao
2020-06-28  5:28       ` [dpdk-dev] [PATCH v4 2/4] net/ice: fix tunnel type for switch rule Wei Zhao
2020-06-28  5:28       ` [dpdk-dev] [PATCH v4 3/4] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-28  5:28       ` [dpdk-dev] [PATCH v4 4/4] net/ice: add input set byte number check Wei Zhao
2020-06-29  5:10       ` [dpdk-dev] [PATCH v5 0/5] enable more PPPoE packet type for switch Wei Zhao
2020-06-29  5:10         ` [dpdk-dev] [PATCH v5 1/5] net/ice: add support " Wei Zhao
2020-06-29  5:10         ` [dpdk-dev] [PATCH v5 2/5] net/ice: fix tunnel type for switch rule Wei Zhao
2020-06-29  5:10         ` [dpdk-dev] [PATCH v5 3/5] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-29  5:10         ` [dpdk-dev] [PATCH v5 4/5] net/ice: add input set byte number check Wei Zhao
2020-06-29  5:10         ` [dpdk-dev] [PATCH v5 5/5] net/ice: fix typo Wei Zhao
2020-07-03  2:47         ` [dpdk-dev] [PATCH v5 0/5] enable more PPPoE packet type for switch Lu, Nannan
2020-07-03  6:19         ` [dpdk-dev] [PATCH v6 " Wei Zhao
2020-07-03  6:19           ` [dpdk-dev] [PATCH v6 1/5] net/ice: add support more PPPoE packeat " Wei Zhao
2020-07-03  6:19           ` [dpdk-dev] [PATCH v6 2/5] net/ice: fix tunnel type for switch rule Wei Zhao
2020-07-03  6:19           ` [dpdk-dev] [PATCH v6 3/5] net/ice: support switch flow for specific L4 type Wei Zhao
2020-07-03  6:19           ` [dpdk-dev] [PATCH v6 4/5] net/ice: add input set byte number check Wei Zhao
2020-07-03  6:19           ` [dpdk-dev] [PATCH v6 5/5] net/ice: fix typo Wei Zhao
2020-07-03 13:46           ` [dpdk-dev] [PATCH v6 0/5] enable more PPPoE packet type for switch Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200628032151.71098-5-wei.zhao1@intel.com \
    --to=wei.zhao1@intel.com \
    --cc=dev@dpdk.org \
    --cc=nannan.lu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.