All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yuying Zhang <yuying.zhang@intel.com>
To: dev@dpdk.org, qi.z.zhang@intel.com
Cc: Yuying Zhang <yuying.zhang@intel.com>
Subject: [dpdk-dev] [PATCH RFC 1/2] net/ice/base: support drop any and steer all to queue
Date: Mon, 30 Aug 2021 07:56:46 +0000	[thread overview]
Message-ID: <20210830075647.3011046-1-yuying.zhang@intel.com> (raw)

This patch supports drop any and steer all to queue in switch
filter.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/ice/base/ice_flex_pipe.c     | 73 +++++++++++++++---------
 drivers/net/ice/base/ice_flex_pipe.h     |  5 +-
 drivers/net/ice/base/ice_flex_type.h     |  1 +
 drivers/net/ice/base/ice_protocol_type.h |  3 +-
 drivers/net/ice/base/ice_switch.c        | 39 +++++++------
 5 files changed, 73 insertions(+), 48 deletions(-)

diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index cf470bc4f0..2ebef279a0 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -1711,23 +1711,30 @@ static enum ice_prof_type
 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
 {
 	u16 i;
+	bool is_any = false;
 
 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+		if (fv->ew[i].off != ICE_NAN_OFFSET)
+			is_any = true;
+
 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
 		    fv->ew[i].off == ICE_VNI_OFFSET)
-			return ICE_PROF_TUN_UDP;
+			return ICE_PROF_TUN_UDP | ICE_PROF_ANY;
 
 		/* GRE tunnel will have GRE protocol */
 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
-			return ICE_PROF_TUN_GRE;
+			return ICE_PROF_TUN_GRE | ICE_PROF_ANY;
 
 		/* PPPOE tunnel will have PPPOE protocol */
 		if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
-			return ICE_PROF_TUN_PPPOE;
+			return ICE_PROF_TUN_PPPOE | ICE_PROF_ANY;
 	}
 
-	return ICE_PROF_NON_TUN;
+	if (is_any)
+		return ICE_PROF_NON_TUN | ICE_PROF_ANY;
+	else
+		return ICE_PROF_NON_TUN;
 }
 
 /**
@@ -1764,7 +1771,6 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
 		if (fv) {
 			/* Determine field vector type */
 			prof_type = ice_get_sw_prof_type(hw, fv);
-
 			if (req_profs & prof_type)
 				ice_set_bit((u16)offset, bm);
 		}
@@ -1787,8 +1793,9 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
  * allocated for every list entry.
  */
 enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
-		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+ice_get_sw_fv_list(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+		   u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm,
+		   struct LIST_HEAD_TYPE *fv_list)
 {
 	struct ice_sw_fv_list_entry *fvl;
 	struct ice_sw_fv_list_entry *tmp;
@@ -1799,7 +1806,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
 
 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
 
-	if (!ids_cnt || !hw->seg)
+	if (tun_type != ICE_ANY && (!ids_cnt || !hw->seg))
 		return ICE_ERR_PARAM;
 
 	ice_seg = hw->seg;
@@ -1819,28 +1826,38 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
 		if (!ice_is_bit_set(bm, (u16)offset))
 			continue;
 
-		for (i = 0; i < ids_cnt; i++) {
-			int j;
+		if (tun_type == ICE_ANY) {
+			fvl = (struct ice_sw_fv_list_entry *)
+			       ice_malloc(hw, sizeof(*fvl));
+			if (!fvl)
+				goto err;
+			fvl->fv_ptr = fv;
+			fvl->profile_id = offset;
+			LIST_ADD(&fvl->list_entry, fv_list);
+		} else {
+			for (i = 0; i < ids_cnt; i++) {
+				int j;
 
-			/* This code assumes that if a switch field vector line
-			 * has a matching protocol, then this line will contain
-			 * the entries necessary to represent every field in
-			 * that protocol header.
-			 */
-			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
-				if (fv->ew[j].prot_id == prot_ids[i])
+				/* This code assumes that if a switch field vector
+				 * line has a matching protocol, then this line
+				 * will contain the entries necessary to represent
+				 * every field in that protocol header.
+				 */
+				for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+					if (fv->ew[j].prot_id == prot_ids[i])
+						break;
+				if (j >= hw->blk[ICE_BLK_SW].es.fvw)
 					break;
-			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
-				break;
-			if (i + 1 == ids_cnt) {
-				fvl = (struct ice_sw_fv_list_entry *)
-					ice_malloc(hw, sizeof(*fvl));
-				if (!fvl)
-					goto err;
-				fvl->fv_ptr = fv;
-				fvl->profile_id = offset;
-				LIST_ADD(&fvl->list_entry, fv_list);
-				break;
+				if (i + 1 == ids_cnt) {
+					fvl = (struct ice_sw_fv_list_entry *)
+						ice_malloc(hw, sizeof(*fvl));
+					if (!fvl)
+						goto err;
+					fvl->fv_ptr = fv;
+					fvl->profile_id = offset;
+					LIST_ADD(&fvl->list_entry, fv_list);
+					break;
+				}
 			}
 		}
 	} while (fv);
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index 58e3c1d1ec..ca9b216f69 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -36,8 +36,9 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
 void
 ice_init_prof_result_bm(struct ice_hw *hw);
 enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
-		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
+ice_get_sw_fv_list(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+		   u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm,
+		   struct LIST_HEAD_TYPE *fv_list);
 enum ice_status
 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index c7f92b9150..b63b984688 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -916,6 +916,7 @@ enum ice_prof_type {
 	ICE_PROF_TUN_GRE = 0x4,
 	ICE_PROF_TUN_PPPOE = 0x8,
 	ICE_PROF_TUN_ALL = 0xE,
+	ICE_PROF_ANY = 0x10,
 	ICE_PROF_ALL = 0xFF,
 };
 
diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h
index d769ad0580..0a8c39b369 100644
--- a/drivers/net/ice/base/ice_protocol_type.h
+++ b/drivers/net/ice/base/ice_protocol_type.h
@@ -109,6 +109,7 @@ enum ice_sw_tunnel_type {
 	ICE_SW_TUN_PPPOE_PAY_QINQ,
 	ICE_SW_TUN_PPPOE_IPV4_QINQ,
 	ICE_SW_TUN_PPPOE_IPV6_QINQ,
+	ICE_ANY,
 	ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
 };
 
@@ -164,7 +165,7 @@ enum ice_prot_id {
 };
 
 #define ICE_VNI_OFFSET		12 /* offset of VNI from ICE_PROT_UDP_OF */
-
+#define ICE_NAN_OFFSET		511
 #define ICE_MAC_OFOS_HW		1
 #define ICE_MAC_IL_HW		4
 #define ICE_ETYPE_OL_HW		9
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 4568242c10..4bf9761909 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -7235,19 +7235,22 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
  * @fv_list: pointer to a list that holds the returned field vectors
  */
 static enum ice_status
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ice_get_fv(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+	   struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
 	   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
 {
 	enum ice_status status;
 	u8 *prot_ids;
 	u16 i;
 
-	if (!lkups_cnt)
+	if (!lkups_cnt && tun_type != ICE_ANY)
 		return ICE_SUCCESS;
 
-	prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
-	if (!prot_ids)
-		return ICE_ERR_NO_MEMORY;
+	if (lkups_cnt) {
+		prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
+		if (!prot_ids)
+			return ICE_ERR_NO_MEMORY;
+	}
 
 	for (i = 0; i < lkups_cnt; i++)
 		if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
@@ -7256,10 +7259,12 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
 		}
 
 	/* Find field vectors that include all specified protocol types */
-	status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+	status = ice_get_sw_fv_list(hw, tun_type, prot_ids,
+				    lkups_cnt, bm, fv_list);
 
 free_mem:
-	ice_free(hw, prot_ids);
+	if (lkups_cnt)
+		ice_free(hw, prot_ids);
 	return status;
 }
 
@@ -7340,6 +7345,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
 
 	switch (rinfo->tun_type) {
+	case ICE_ANY:
+		prof_type = ICE_PROF_ANY;
+		break;
 	case ICE_NON_TUN:
 	case ICE_NON_TUN_QINQ:
 		prof_type = ICE_PROF_NON_TUN;
@@ -7495,6 +7503,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 {
 	switch (type) {
+	case ICE_ANY:
 	case ICE_SW_TUN_PROFID_IPV6_ESP:
 	case ICE_SW_TUN_PROFID_IPV6_AH:
 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
@@ -7579,7 +7588,8 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
 	 */
 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
 
-	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+	status = ice_get_fv(hw, rinfo->tun_type, lkups, lkups_cnt,
+			    fv_bitmap, &rm->fv_list);
 	if (status)
 		goto err_unroll;
 
@@ -8314,15 +8324,10 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
 	enum ice_status status;
 	u16 vsi_list_id = 0;
 
-	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
-	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
-	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
-		return ICE_ERR_NOT_IMPL;
-
-	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
-	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
-	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
-	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
+	if ((cur_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI &&
+	     cur_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) ||
+	    (new_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI &&
+	     new_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI_LIST))
 		return ICE_ERR_NOT_IMPL;
 
 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
-- 
2.25.1


             reply	other threads:[~2021-08-30  8:13 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-30  7:56 Yuying Zhang [this message]
2021-08-30  7:56 ` [dpdk-dev] [PATCH RFC 2/2] net/ice: support drop any and steer all to queue Yuying Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210830075647.3011046-1-yuying.zhang@intel.com \
    --to=yuying.zhang@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.