All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qi Zhang <qi.z.zhang@intel.com>
To: wenzhuo.lu@intel.com, helin.zhang@intel.com
Cc: dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH v2 3/3] net/ixgbe: enable flex bytes for generic flow API
Date: Wed, 31 May 2017 15:45:40 -0400	[thread overview]
Message-ID: <1496259940-15547-4-git-send-email-qi.z.zhang@intel.com> (raw)
In-Reply-To: <1496259940-15547-1-git-send-email-qi.z.zhang@intel.com>

Add fdir flex byte support for rte_flow APIs.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---

v2:
- fix couple checkpatch errors.

 drivers/net/ixgbe/ixgbe_ethdev.h |   3 +
 drivers/net/ixgbe/ixgbe_fdir.c   |  31 ++++++++-
 drivers/net/ixgbe/ixgbe_flow.c   | 137 ++++++++++++++++++++++++++++++++++++++-
 3 files changed, 167 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index b576a6f..c6449b5 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -189,6 +189,7 @@ struct ixgbe_fdir_rule {
 	uint32_t fdirflags; /* drop or forward */
 	uint32_t soft_id; /* an unique value for this rule */
 	uint8_t queue; /* assigned rx queue */
+	uint8_t flex_bytes_offset;
 };
 
 struct ixgbe_hw_fdir_info {
@@ -624,6 +625,8 @@ void ixgbe_filterlist_flush(void);
  */
 int ixgbe_fdir_configure(struct rte_eth_dev *dev);
 int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+				    uint16_t offset);
 int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
 			      struct ixgbe_fdir_rule *rule,
 			      bool del, bool update);
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 7f6c7b5..950f5ba 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -302,7 +302,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 	 * mask VM pool and DIPv6 since there are currently not supported
 	 * mask FLEX byte, it will be set in flex_conf
 	 */
-	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
+	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 	uint32_t fdirtcpm;  /* TCP source and destination port masks. */
 	uint32_t fdiripv6m; /* IPv6 source and destination masks. */
 	volatile uint32_t *reg;
@@ -333,6 +333,10 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
+	/* flex byte mask */
+	if (info->mask.flex_bytes_mask == 0)
+		fdirm |= IXGBE_FDIRM_FLEX;
+
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
 	/* store the TCP/UDP port masks, bit reversed from port layout */
@@ -533,6 +537,31 @@ ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
 	return -ENOTSUP;
 }
 
+int
+ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+				uint16_t offset)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t fdirctrl;
+	int i;
+
+	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
+	fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
+	fdirctrl |= ((offset >> 1) /* convert to word offset */
+		<< IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+	IXGBE_WRITE_FLUSH(hw);
+	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+			IXGBE_FDIRCTRL_INIT_DONE)
+			break;
+		msec_delay(1);
+	}
+	return 0;
+}
+
 static int
 fdir_set_input_mask(struct rte_eth_dev *dev,
 		    const struct rte_eth_fdir_masks *input_mask)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 886180e..cd599b3 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -78,6 +78,7 @@
 
 #define IXGBE_MIN_N_TUPLE_PRIO 1
 #define IXGBE_MAX_N_TUPLE_PRIO 7
+#define IXGBE_MAX_FLX_SOURCE_OFF 62
 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
 	do {		\
 		item = pattern + index;\
@@ -1316,7 +1317,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
  * UDP/TCP/SCTP PATTERN:
  * The first not void item can be ETH or IPV4.
  * The second not void item must be IPV4 if the first one is ETH.
- * The third not void item must be UDP or TCP or SCTP.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
  * The next not void item must be END.
  * MAC VLAN PATTERN:
  * The first not void item must be ETH.
@@ -1334,6 +1336,14 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
  *		dst_addr 192.167.3.50	0xFFFFFFFF
  * UDP/TCP/SCTP	src_port	80	0xFFFF
  *		dst_port	80	0xFFFF
+ * FLEX	relative	0	0x1
+ *		search		0	0x1
+ *		reserved	0	0
+ *		offset		12	0xFFFFFFFF
+ *		limit		0	0xFFFF
+ *		length		2	0xFFFF
+ *		pattern[0]	0x86	0xFF
+ *		pattern[1]	0xDD	0xFF
  * END
  * MAC VLAN pattern example:
  * ITEM		Spec			Mask
@@ -1365,6 +1375,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_sctp *sctp_mask;
 	const struct rte_flow_item_vlan *vlan_spec;
 	const struct rte_flow_item_vlan *vlan_mask;
+	const struct rte_flow_item_raw *raw_mask;
+	const struct rte_flow_item_raw *raw_spec;
 
 	uint32_t index, j;
 
@@ -1396,6 +1408,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
 	memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 	memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
 	rule->mask.vlan_tci_mask = 0;
+	rule->mask.flex_bytes_mask = 0;
 
 	/* parse pattern */
 	index = 0;
@@ -1623,7 +1636,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
 		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
 		    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
 		    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
-		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+		    item->type != RTE_FLOW_ITEM_TYPE_END &&
+		    item->type != RTE_FLOW_ITEM_TYPE_RAW) {
 			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1684,6 +1698,18 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
 			rule->ixgbe_fdir.formatted.dst_port =
 				tcp_spec->hdr.dst_port;
 		}
+
+		index++;
+		NEXT_ITEM_OF_PATTERN(item, pattern, index);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
 	}
 
 	/* Get the UDP info */
@@ -1733,6 +1759,18 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
 			rule->ixgbe_fdir.formatted.dst_port =
 				udp_spec->hdr.dst_port;
 		}
+
+		index++;
+		NEXT_ITEM_OF_PATTERN(item, pattern, index);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
 	}
 
 	/* Get the SCTP info */
@@ -1784,6 +1822,88 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
 			rule->ixgbe_fdir.formatted.dst_port =
 				sctp_spec->hdr.dst_port;
 		}
+
+		index++;
+		NEXT_ITEM_OF_PATTERN(item, pattern, index);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+	}
+
+	/* Get the flex byte info */
+	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+		/* Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* mask should not be null */
+		if (!item->mask || !item->spec) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+		/* check mask */
+		if (raw_mask->relative != 0x1 ||
+		    raw_mask->search != 0x1 ||
+		    raw_mask->reserved != 0x0 ||
+		    (uint32_t)raw_mask->offset != 0xffffffff ||
+		    raw_mask->limit != 0xffff ||
+		    raw_mask->length != 0xffff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		raw_spec = (const struct rte_flow_item_raw *)item->spec;
+
+		/* check spec */
+		if (raw_spec->relative != 0 ||
+		    raw_spec->search != 0 ||
+		    raw_spec->reserved != 0 ||
+		    raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+		    raw_spec->offset % 2 ||
+		    raw_spec->limit != 0 ||
+		    raw_spec->length != 2 ||
+		    /* pattern can't be 0xffff */
+		    (raw_spec->pattern[0] == 0xff &&
+		     raw_spec->pattern[1] == 0xff)) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		/* check pattern mask */
+		if (raw_mask->pattern[0] != 0xff ||
+		    raw_mask->pattern[1] != 0xff) {
+			memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fdir filter");
+			return -rte_errno;
+		}
+
+		rule->mask.flex_bytes_mask = 0xffff;
+		rule->ixgbe_fdir.formatted.flex_bytes =
+			(((uint16_t)raw_spec->pattern[1]) << 8) |
+			raw_spec->pattern[0];
+		rule->flex_bytes_offset = raw_spec->offset;
 	}
 
 	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
@@ -1921,7 +2041,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 				item, "Not supported by fdir filter");
 			return -rte_errno;
 		}
-		/*Not supported last point for range*/
+		/* Not supported last point for range*/
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -2499,6 +2619,13 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
 				rte_memcpy(&fdir_info->mask,
 					&fdir_rule.mask,
 					sizeof(struct ixgbe_hw_fdir_mask));
+				fdir_info->flex_bytes_offset =
+					fdir_rule.flex_bytes_offset;
+
+				if (fdir_rule.mask.flex_bytes_mask)
+					ixgbe_fdir_set_flexbytes_offset(dev,
+						fdir_rule.flex_bytes_offset);
+
 				ret = ixgbe_fdir_set_input_mask(dev);
 				if (ret)
 					goto out;
@@ -2514,6 +2641,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
 					sizeof(struct ixgbe_hw_fdir_mask));
 				if (ret)
 					goto out;
+
+				if (fdir_info->flex_bytes_offset !=
+						fdir_rule.flex_bytes_offset)
+					goto out;
 			}
 		}
 
-- 
2.7.4

  parent reply	other threads:[~2017-06-01  2:52 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-31 19:45 [PATCH v2 0/3] ixgbe: enable flex filter for rte_flow Qi Zhang
2017-05-31 19:45 ` [PATCH v2 1/3] net/ixgbe: remove reduandent code Qi Zhang
2017-06-01  7:05   ` Lu, Wenzhuo
2017-05-31 19:45 ` [PATCH v2 2/3] net/ixgbe: fix fdir mask not be reset Qi Zhang
2017-06-01  7:17   ` Lu, Wenzhuo
2017-05-31 19:45 ` Qi Zhang [this message]
2017-06-02  1:49   ` [PATCH v2 3/3] net/ixgbe: enable flex bytes for generic flow API Lu, Wenzhuo
2017-06-02  2:00     ` Zhang, Qi Z
2017-06-02  2:14       ` Lu, Wenzhuo
2017-06-01 17:36 ` [PATCH v3 0/3] ixgbe: enable flex filter for rte_flow Qi Zhang
2017-06-01 17:36   ` [PATCH v3 1/3] net/ixgbe: remove reduandent code Qi Zhang
2017-06-02  1:19     ` Lu, Wenzhuo
2017-06-01 17:36   ` [PATCH v3 2/3] net/ixgbe: fix fdir mask not be reset Qi Zhang
2017-06-01 17:36   ` [PATCH v3 3/3] net/ixgbe: enable flex bytes for generic flow API Qi Zhang
2017-06-06 10:26   ` [PATCH v3 0/3] ixgbe: enable flex filter for rte_flow Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1496259940-15547-4-git-send-email-qi.z.zhang@intel.com \
    --to=qi.z.zhang@intel.com \
    --cc=dev@dpdk.org \
    --cc=helin.zhang@intel.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.