All of lore.kernel.org
 help / color / mirror / Atom feed
From: Simei Su <simei.su@intel.com>
To: qi.z.zhang@intel.com, beilei.xing@intel.com
Cc: dev@dpdk.org, jia.guo@intel.com, Simei Su <simei.su@intel.com>
Subject: [dpdk-dev] [PATCH v3] net/ice: fix GTPU down/uplink and extension conflict
Date: Sun, 26 Jul 2020 11:13:47 +0800	[thread overview]
Message-ID: <1595733227-428607-1-git-send-email-simei.su@intel.com> (raw)
In-Reply-To: <1595601712-403946-1-git-send-email-simei.su@intel.com>

When adding a RSS rule with GTPU_DWN/UP, it will write from top to
bottom for profile due to firmware limitation. If a RSS rule with
GTPU_EH already exists, then GTPU_DWN/UP packet will match GTPU_EH
profile. This patch solves this issue by remembering a gtpu_eh RSS
configure and removing it before the corresponding RSS configure
for downlink/uplink rule is issued.

Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS")

Signed-off-by: Simei Su <simei.su@intel.com>
---

v3:
* Rename global structure.
* Use some macros to avoid code duplication.
* Revise incorrect code where uses local variable.

v2:
* Refine commit log.
* Fix gtpu downlink and uplink can't be issued simultaneously.
* Fix gtpu down/uplink ipv4_udp/tcp or ipv6_udp/tcp symmetric
  don't take effect.
---
 drivers/net/ice/ice_ethdev.c |  44 +++++++++++
 drivers/net/ice/ice_ethdev.h |  23 ++++++
 drivers/net/ice/ice_hash.c   | 174 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 241 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a4a0390..67f6c65 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2539,6 +2539,12 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
 				    __func__, ret);
 
+		/* Store hash field and header for gtpu_eh ipv4 */
+		pf->gtpu_ctx.ipv4.hash_fld = ICE_FLOW_HASH_IPV4;
+		pf->gtpu_ctx.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH |
+					   ICE_FLOW_SEG_HDR_IPV4 |
+					   ICE_FLOW_SEG_HDR_IPV_OTHER;
+
 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
 				ICE_FLOW_SEG_HDR_PPPOE |
 				ICE_FLOW_SEG_HDR_IPV4 |
@@ -2565,6 +2571,12 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
 				    __func__, ret);
 
+		/* Store hash field and header for gtpu_eh ipv6 */
+		pf->gtpu_ctx.ipv6.hash_fld = ICE_FLOW_HASH_IPV6;
+		pf->gtpu_ctx.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH |
+					   ICE_FLOW_SEG_HDR_IPV6 |
+					   ICE_FLOW_SEG_HDR_IPV_OTHER;
+
 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
 				ICE_FLOW_SEG_HDR_PPPOE |
 				ICE_FLOW_SEG_HDR_IPV6 |
@@ -2587,6 +2599,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
 				    __func__, ret);
 
+		/* Store hash field and header for gtpu_eh ipv4_udp */
+		pf->gtpu_ctx.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4;
+		pf->gtpu_ctx.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
+
 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
 				ICE_FLOW_SEG_HDR_PPPOE, 0);
 		if (ret)
@@ -2607,6 +2623,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
 				    __func__, ret);
 
+		/* Store hash field and header for gtpu_eh ipv6_udp */
+		pf->gtpu_ctx.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6;
+		pf->gtpu_ctx.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
+
 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
 				ICE_FLOW_SEG_HDR_PPPOE, 0);
 		if (ret)
@@ -2627,6 +2647,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
 				    __func__, ret);
 
+		/* Store hash field and header for gtpu_eh ipv4_tcp */
+		pf->gtpu_ctx.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4;
+		pf->gtpu_ctx.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
+
 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
 				ICE_FLOW_SEG_HDR_PPPOE, 0);
 		if (ret)
@@ -2647,6 +2671,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
 				    __func__, ret);
 
+		/* Store hash field and header for gtpu_eh ipv6_tcp */
+		pf->gtpu_ctx.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6;
+		pf->gtpu_ctx.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
+
 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
 				ICE_FLOW_SEG_HDR_PPPOE, 0);
 		if (ret)
@@ -2695,6 +2723,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 	}
 }
 
+static void
+ice_rss_ctx_init(struct ice_pf *pf)
+{
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4);
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6);
+
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_udp);
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_udp);
+
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_tcp);
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_tcp);
+}
+
 static int ice_init_rss(struct ice_pf *pf)
 {
 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -2755,6 +2796,9 @@ static int ice_init_rss(struct ice_pf *pf)
 		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
 	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
 
+	/* Initialize RSS context for gtpu_eh */
+	ice_rss_ctx_init(pf);
+
 	/* RSS hash configuration */
 	ice_rss_hash_set(pf, rss_conf->rss_hf);
 
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 87984ef..1725702 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -358,6 +358,28 @@ struct ice_fdir_info {
 	struct ice_fdir_counter_pool_container counter;
 };
 
+#define ICE_HASH_CFG_VALID(p)				\
+	((p)->hash_fld != 0 && (p)->pkt_hdr != 0)
+
+#define ICE_HASH_CFG_RESET(p) do {	\
+	(p)->hash_fld = 0;		\
+	(p)->pkt_hdr = 0;		\
+} while (0)
+
+struct ice_hash_cfg {
+	uint32_t pkt_hdr;
+	uint64_t hash_fld;
+};
+
+struct ice_hash_gtpu_ctx {
+	struct ice_hash_cfg ipv4;
+	struct ice_hash_cfg ipv6;
+	struct ice_hash_cfg ipv4_udp;
+	struct ice_hash_cfg ipv6_udp;
+	struct ice_hash_cfg ipv4_tcp;
+	struct ice_hash_cfg ipv6_tcp;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -381,6 +403,7 @@ struct ice_pf {
 	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
 	uint16_t fdir_qp_offset;
 	struct ice_fdir_info fdir; /* flow director info */
+	struct ice_hash_gtpu_ctx gtpu_ctx;
 	uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
 	uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
 	struct ice_hw_port_stats stats_offset;
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index e535e4b..3258c1c 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -1232,6 +1232,161 @@ struct ice_hash_match_type ice_hash_type_list[] = {
 }
 
 static int
+ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_vsi *vsi = pf->main_vsi;
+	int ret;
+
+	/**
+	 * If header field contains GTPU_EH, store gtpu_eh context.
+	 * If header field contains GTPU_DWN/UP, remove existed gtpu_eh.
+	 */
+	if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
+		if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
+				pf->gtpu_ctx.ipv4_udp.pkt_hdr = hdr;
+				pf->gtpu_ctx.ipv4_udp.hash_fld = fld;
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
+				pf->gtpu_ctx.ipv6_udp.pkt_hdr = hdr;
+				pf->gtpu_ctx.ipv6_udp.hash_fld = fld;
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
+				pf->gtpu_ctx.ipv4_tcp.pkt_hdr = hdr;
+				pf->gtpu_ctx.ipv4_tcp.hash_fld = fld;
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
+				pf->gtpu_ctx.ipv6_tcp.pkt_hdr = hdr;
+				pf->gtpu_ctx.ipv6_tcp.hash_fld = fld;
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+			(hdr & (ICE_FLOW_SEG_HDR_UDP |
+				ICE_FLOW_SEG_HDR_TCP)) == 0) {
+				pf->gtpu_ctx.ipv4.pkt_hdr = hdr;
+				pf->gtpu_ctx.ipv4.hash_fld = fld;
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+			(hdr & (ICE_FLOW_SEG_HDR_UDP |
+				ICE_FLOW_SEG_HDR_TCP)) == 0) {
+				pf->gtpu_ctx.ipv6.pkt_hdr = hdr;
+				pf->gtpu_ctx.ipv6.hash_fld = fld;
+		}
+	} else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
+			ICE_FLOW_SEG_HDR_GTPU_UP)) {
+		if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4_udp)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv4_udp.hash_fld,
+					pf->gtpu_ctx.ipv4_udp.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_udp);
+			}
+
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv4.hash_fld,
+					pf->gtpu_ctx.ipv4.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4);
+			}
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6_udp)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv6_udp.hash_fld,
+					pf->gtpu_ctx.ipv6_udp.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_udp);
+			}
+
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv6.hash_fld,
+					pf->gtpu_ctx.ipv6.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6);
+			}
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4_tcp)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv4_tcp.hash_fld,
+					pf->gtpu_ctx.ipv4_tcp.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_tcp);
+			}
+
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv4.hash_fld,
+					pf->gtpu_ctx.ipv4.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4);
+			}
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6_tcp)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv6_tcp.hash_fld,
+					pf->gtpu_ctx.ipv6_tcp.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_tcp);
+			}
+
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv6.hash_fld,
+					pf->gtpu_ctx.ipv6.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6);
+			}
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+			(hdr & (ICE_FLOW_SEG_HDR_UDP |
+				ICE_FLOW_SEG_HDR_TCP)) == 0) {
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv4.hash_fld,
+					pf->gtpu_ctx.ipv4.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4);
+			}
+		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+			(hdr & (ICE_FLOW_SEG_HDR_UDP |
+				ICE_FLOW_SEG_HDR_TCP)) == 0) {
+			if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6)) {
+				ret = ice_rem_rss_cfg(hw, vsi->idx,
+					pf->gtpu_ctx.ipv6.hash_fld,
+					pf->gtpu_ctx.ipv6.pkt_hdr);
+				if (ret)
+					return -rte_errno;
+
+				ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
 ice_hash_create(struct ice_adapter *ad,
 		struct rte_flow *flow,
 		void *meta,
@@ -1248,6 +1403,10 @@ struct ice_hash_match_type ice_hash_type_list[] = {
 	uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds;
 	uint8_t hash_function = ((struct rss_meta *)meta)->hash_function;
 
+	ret = ice_add_rss_cfg_pre(pf, headermask, hash_field);
+	if (ret)
+		return -rte_errno;
+
 	filter_ptr = rte_zmalloc("ice_rss_filter",
 				sizeof(struct ice_hash_flow_cfg), 0);
 	if (!filter_ptr) {
@@ -1297,6 +1456,19 @@ struct ice_hash_match_type ice_hash_type_list[] = {
 	return -rte_errno;
 }
 
+static void
+ice_rem_rss_cfg_post(struct ice_pf *pf)
+{
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4);
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6);
+
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_udp);
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_udp);
+
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_tcp);
+	ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_tcp);
+}
+
 static int
 ice_hash_destroy(struct ice_adapter *ad,
 		struct rte_flow *flow,
@@ -1334,6 +1506,8 @@ struct ice_hash_match_type ice_hash_type_list[] = {
 		}
 	}
 
+	ice_rem_rss_cfg_post(pf);
+
 	rte_free(filter_ptr);
 	return 0;
 
-- 
1.8.3.1


  reply	other threads:[~2020-07-26  3:17 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-24  2:10 [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict Simei Su
2020-07-24  7:13 ` Jeff Guo
2020-07-24 14:19   ` Su, Simei
2020-07-24 14:41 ` [dpdk-dev] [PATCH v2] " Simei Su
2020-07-26  3:13   ` Simei Su [this message]
2020-07-27  9:38     ` [dpdk-dev] [PATCH v4] " Simei Su
2020-07-28  8:44       ` [dpdk-dev] [PATCH v5] " Simei Su
2020-07-28 11:07         ` [dpdk-dev] [PATCH v6] " Simei Su

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1595733227-428607-1-git-send-email-simei.su@intel.com \
    --to=simei.su@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jia.guo@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.