All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v2 23/37] net/txgbe: add flow API create function
Date: Wed, 11 Nov 2020 14:49:22 +0800	[thread overview]
Message-ID: <20201111064936.768604-24-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201111064936.768604-1-jiawenwu@trustnetic.com>

Add support to create operation for flow API.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/txgbe_ethdev.h |   2 +
 drivers/net/txgbe/txgbe_fdir.c   |  27 ++++
 drivers/net/txgbe/txgbe_flow.c   | 257 +++++++++++++++++++++++++++++++
 3 files changed, 286 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index a0a452c1a..a0a18d254 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -464,6 +464,8 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
  */
 int txgbe_fdir_configure(struct rte_eth_dev *dev);
 int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+				    uint16_t offset);
 int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
 			      struct txgbe_fdir_rule *rule,
 			      bool del, bool update);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 2faf7fd84..2342cf681 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -270,6 +270,33 @@ txgbe_fdir_store_input_mask(struct rte_eth_dev *dev)
 	return 0;
 }
 
+int
+txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+				uint16_t offset)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	int i;
+
+	for (i = 0; i < 64; i++) {
+		uint32_t flexreg, flex;
+		flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+		flex = TXGBE_FDIRFLEXCFG_BASE_MAC;
+		flex |= TXGBE_FDIRFLEXCFG_OFST(offset / 2);
+		flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+		flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+		wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+	}
+
+	txgbe_flush(hw);
+	for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (rd32(hw, TXGBE_FDIRCTL) &
+			TXGBE_FDIRCTL_INITDONE)
+			break;
+		msec_delay(1);
+	}
+	return 0;
+}
+
 /*
  * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
  * arguments are valid
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 884a8545f..4141352bf 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2629,6 +2629,262 @@ txgbe_filterlist_flush(void)
 	}
 }
 
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+txgbe_flow_create(struct rte_eth_dev *dev,
+		  const struct rte_flow_attr *attr,
+		  const struct rte_flow_item pattern[],
+		  const struct rte_flow_action actions[],
+		  struct rte_flow_error *error)
+{
+	int ret;
+	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_syn_filter syn_filter;
+	struct txgbe_fdir_rule fdir_rule;
+	struct txgbe_l2_tunnel_conf l2_tn_filter;
+	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+	struct txgbe_rte_flow_rss_conf rss_conf;
+	struct rte_flow *flow = NULL;
+	struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+	struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+	struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+	struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+	struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+	struct txgbe_rss_conf_ele *rss_filter_ptr;
+	struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+	uint8_t first_mask = FALSE;
+
+	flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
+		return (struct rte_flow *)flow;
+	}
+	txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
+			sizeof(struct txgbe_flow_mem), 0);
+	if (!txgbe_flow_mem_ptr) {
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
+		rte_free(flow);
+		return NULL;
+	}
+	txgbe_flow_mem_ptr->flow = flow;
+	TAILQ_INSERT_TAIL(&txgbe_flow_list,
+				txgbe_flow_mem_ptr, entries);
+
+	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
+			actions, &ntuple_filter, error);
+
+	if (!ret) {
+		ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+		if (!ret) {
+			ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
+				sizeof(struct txgbe_ntuple_filter_ele), 0);
+			if (!ntuple_filter_ptr) {
+				PMD_DRV_LOG(ERR, "failed to allocate memory");
+				goto out;
+			}
+			rte_memcpy(&ntuple_filter_ptr->filter_info,
+				&ntuple_filter,
+				sizeof(struct rte_eth_ntuple_filter));
+			TAILQ_INSERT_TAIL(&filter_ntuple_list,
+				ntuple_filter_ptr, entries);
+			flow->rule = ntuple_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+			return flow;
+		}
+		goto out;
+	}
+
+	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+	ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
+				actions, &ethertype_filter, error);
+	if (!ret) {
+		ret = txgbe_add_del_ethertype_filter(dev,
+				&ethertype_filter, TRUE);
+		if (!ret) {
+			ethertype_filter_ptr =
+				rte_zmalloc("txgbe_ethertype_filter",
+				sizeof(struct txgbe_ethertype_filter_ele), 0);
+			if (!ethertype_filter_ptr) {
+				PMD_DRV_LOG(ERR, "failed to allocate memory");
+				goto out;
+			}
+			rte_memcpy(&ethertype_filter_ptr->filter_info,
+				&ethertype_filter,
+				sizeof(struct rte_eth_ethertype_filter));
+			TAILQ_INSERT_TAIL(&filter_ethertype_list,
+				ethertype_filter_ptr, entries);
+			flow->rule = ethertype_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+			return flow;
+		}
+		goto out;
+	}
+
+	memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+	ret = txgbe_parse_syn_filter(dev, attr, pattern,
+				actions, &syn_filter, error);
+	if (!ret) {
+		ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
+		if (!ret) {
+			syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
+				sizeof(struct txgbe_eth_syn_filter_ele), 0);
+			if (!syn_filter_ptr) {
+				PMD_DRV_LOG(ERR, "failed to allocate memory");
+				goto out;
+			}
+			rte_memcpy(&syn_filter_ptr->filter_info,
+				&syn_filter,
+				sizeof(struct rte_eth_syn_filter));
+			TAILQ_INSERT_TAIL(&filter_syn_list,
+				syn_filter_ptr,
+				entries);
+			flow->rule = syn_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_SYN;
+			return flow;
+		}
+		goto out;
+	}
+
+	memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
+	ret = txgbe_parse_fdir_filter(dev, attr, pattern,
+				actions, &fdir_rule, error);
+	if (!ret) {
+		/* A mask cannot be deleted. */
+		if (fdir_rule.b_mask) {
+			if (!fdir_info->mask_added) {
+				/* It's the first time the mask is set. */
+				rte_memcpy(&fdir_info->mask,
+					&fdir_rule.mask,
+					sizeof(struct txgbe_hw_fdir_mask));
+				fdir_info->flex_bytes_offset =
+					fdir_rule.flex_bytes_offset;
+
+				if (fdir_rule.mask.flex_bytes_mask)
+					txgbe_fdir_set_flexbytes_offset(dev,
+						fdir_rule.flex_bytes_offset);
+
+				ret = txgbe_fdir_set_input_mask(dev);
+				if (ret)
+					goto out;
+
+				fdir_info->mask_added = TRUE;
+				first_mask = TRUE;
+			} else {
+				/**
+				 * Only support one global mask,
+				 * all the masks should be the same.
+				 */
+				ret = memcmp(&fdir_info->mask,
+					&fdir_rule.mask,
+					sizeof(struct txgbe_hw_fdir_mask));
+				if (ret)
+					goto out;
+
+				if (fdir_info->flex_bytes_offset !=
+						fdir_rule.flex_bytes_offset)
+					goto out;
+			}
+		}
+
+		if (fdir_rule.b_spec) {
+			ret = txgbe_fdir_filter_program(dev, &fdir_rule,
+					FALSE, FALSE);
+			if (!ret) {
+				fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
+					sizeof(struct txgbe_fdir_rule_ele), 0);
+				if (!fdir_rule_ptr) {
+					PMD_DRV_LOG(ERR,
+						"failed to allocate memory");
+					goto out;
+				}
+				rte_memcpy(&fdir_rule_ptr->filter_info,
+					&fdir_rule,
+					sizeof(struct txgbe_fdir_rule));
+				TAILQ_INSERT_TAIL(&filter_fdir_list,
+					fdir_rule_ptr, entries);
+				flow->rule = fdir_rule_ptr;
+				flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+				return flow;
+			}
+
+			if (ret) {
+				/**
+				 * clean the mask_added flag if fail to
+				 * program
+				 **/
+				if (first_mask)
+					fdir_info->mask_added = FALSE;
+				goto out;
+			}
+		}
+
+		goto out;
+	}
+
+	memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+	ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
+					actions, &l2_tn_filter, error);
+	if (!ret) {
+		ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+		if (!ret) {
+			l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
+				sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
+			if (!l2_tn_filter_ptr) {
+				PMD_DRV_LOG(ERR, "failed to allocate memory");
+				goto out;
+			}
+			rte_memcpy(&l2_tn_filter_ptr->filter_info,
+				&l2_tn_filter,
+				sizeof(struct txgbe_l2_tunnel_conf));
+			TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+				l2_tn_filter_ptr, entries);
+			flow->rule = l2_tn_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+			return flow;
+		}
+	}
+
+	memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+	ret = txgbe_parse_rss_filter(dev, attr,
+					actions, &rss_conf, error);
+	if (!ret) {
+		ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
+		if (!ret) {
+			rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
+				sizeof(struct txgbe_rss_conf_ele), 0);
+			if (!rss_filter_ptr) {
+				PMD_DRV_LOG(ERR, "failed to allocate memory");
+				goto out;
+			}
+			txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+					    &rss_conf.conf);
+			TAILQ_INSERT_TAIL(&filter_rss_list,
+				rss_filter_ptr, entries);
+			flow->rule = rss_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_HASH;
+			return flow;
+		}
+	}
+
+out:
+	TAILQ_REMOVE(&txgbe_flow_list,
+		txgbe_flow_mem_ptr, entries);
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(txgbe_flow_mem_ptr);
+	rte_free(flow);
+	return NULL;
+}
+
 /**
  * Check if the flow rule is supported by txgbe.
  * It only checkes the format. Don't guarantee the rule can be programmed into
@@ -2688,5 +2944,6 @@ txgbe_flow_validate(struct rte_eth_dev *dev,
 
 const struct rte_flow_ops txgbe_flow_ops = {
 	.validate = txgbe_flow_validate,
+	.create = txgbe_flow_create,
 };
 
-- 
2.18.4




  parent reply	other threads:[~2020-11-11  6:55 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-11  6:48 [dpdk-dev] [PATCH v2 00/37] net: add txgbe PMD part 2 Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 01/37] net/txgbe: add ntuple filter init and uninit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 02/37] net/txgbe: support ntuple filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 03/37] net/txgbe: add ntuple parse rule Jiawen Wu
2020-11-11 16:06   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 04/37] net/txgbe: support ntuple filter remove operaion Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 05/37] net/txgbe: support ethertype filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 06/37] net/txgbe: add ethertype parse rule Jiawen Wu
2020-11-11 16:02   ` Ferruh Yigit
2020-11-11 16:04     ` Ferruh Yigit
2020-11-12  1:57       ` Wang, Haiyue
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 07/37] net/txgbe: support syn filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 08/37] net/txgbe: add syn filter parse rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 09/37] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 10/37] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 11/37] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 12/37] net/txgbe: add L2 tunnel filter parse rule Jiawen Wu
2020-11-11 16:10   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 13/37] net/txgbe: add FDIR filter init and uninit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 14/37] net/txgbe: configure FDIR filter Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 15/37] net/txgbe: support FDIR add and delete operations Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 16/37] net/txgbe: add FDIR parse normal rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 17/37] net/txgbe: add FDIR parse tunnel rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 18/37] net/txgbe: add FDIR restore operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 19/37] net/txgbe: add RSS filter parse rule Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 20/37] net/txgbe: add RSS filter restore operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 21/37] net/txgbe: add filter list init and uninit Jiawen Wu
2020-11-11 16:10   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 22/37] net/txgbe: add generic flow API Jiawen Wu
2020-11-11  6:49 ` Jiawen Wu [this message]
2020-11-11 16:11   ` [dpdk-dev] [PATCH v2 23/37] net/txgbe: add flow API create function Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 24/37] net/txgbe: add flow API destroy function Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 25/37] net/txgbe: add flow API flush function Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 26/37] net/txgbe: support UDP tunnel port add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 27/37] net/txgbe: add TM configuration init and uninit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 28/37] net/txgbe: add TM capabilities get operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 29/37] net/txgbe: support TM shaper profile add and delete Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 30/37] net/txgbe: support TM node " Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 31/37] net/txgbe: add TM hierarchy commit Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 32/37] net/txgbe: add macsec setting Jiawen Wu
2020-11-11 16:13   ` Ferruh Yigit
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 33/37] net/txgbe: add IPsec context creation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 34/37] net/txgbe: add security session create operation Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 35/37] net/txgbe: support security session destroy Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 36/37] net/txgbe: add security offload in Rx and Tx process Jiawen Wu
2020-11-11  6:49 ` [dpdk-dev] [PATCH v2 37/37] net/txgbe: add security type in flow action Jiawen Wu
2020-11-11 16:00 ` [dpdk-dev] [PATCH v2 00/37] net: add txgbe PMD part 2 Ferruh Yigit
2020-11-11 16:09   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201111064936.768604-24-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.