All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] net/mlx5: support flow_rte
@ 2016-11-25 18:14 Nelio Laranjeiro
  2016-11-25 18:14 ` [PATCH 1/3] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
                   ` (7 more replies)
  0 siblings, 8 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-11-25 18:14 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

This series requires rte_flow [1].

It brings rte_flow support to the same level as flow director (FDIR) in mlx5.

 [1] http://dpdk.org/ml/archives/dev/2016-November/050262.html

Nelio Laranjeiro (3):
  net/mlx5: add preliminary support for rte_flow
  net/mlx5: add software support for rte_flow
  net/mlx5: add rte_flow rule creation

 drivers/net/mlx5/Makefile       |   1 +
 drivers/net/mlx5/mlx5.h         |  17 +
 drivers/net/mlx5/mlx5_fdir.c    |  15 +
 drivers/net/mlx5/mlx5_flow.c    | 877 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_trigger.c |   1 +
 5 files changed, 911 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

-- 
2.1.4

^ permalink raw reply	[flat|nested] 38+ messages in thread

* [PATCH 1/3] net/mlx5: add preliminary support for rte_flow
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
@ 2016-11-25 18:14 ` Nelio Laranjeiro
  2016-11-25 18:14 ` [PATCH 2/3] net/mlx5: add software " Nelio Laranjeiro
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-11-25 18:14 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/Makefile    |   1 +
 drivers/net/mlx5/mlx5.h      |  16 ++++++
 drivers/net/mlx5/mlx5_fdir.c |  15 ++++++
 drivers/net/mlx5/mlx5_flow.c | 122 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 154 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index cf87f0b..6d1338a 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -48,6 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
 
 # Dependencies.
 DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a60..04f4eaa 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -59,6 +59,7 @@
 #include <rte_spinlock.h>
 #include <rte_interrupts.h>
 #include <rte_errno.h>
+#include <rte_flow.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -268,4 +269,19 @@ void priv_fdir_enable(struct priv *);
 int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
 			 enum rte_filter_op, void *);
 
+/* mlx5_flow.c */
+
+int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
+		       const struct rte_flow_item [],
+		       const struct rte_flow_action [],
+		       struct rte_flow_error *);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
+				  const struct rte_flow_attr *,
+				  const struct rte_flow_item [],
+				  const struct rte_flow_action [],
+				  struct rte_flow_error *);
+int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
+		      struct rte_flow_error *);
+int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf682..f80c58b 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -55,6 +55,8 @@
 #include <rte_malloc.h>
 #include <rte_ethdev.h>
 #include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
 	return ret;
 }
 
+static const struct rte_flow_ops mlx5_flow_ops = {
+	.validate = mlx5_flow_validate,
+	.create = mlx5_flow_create,
+	.destroy = mlx5_flow_destroy,
+	.flush = mlx5_flow_flush,
+	.query = NULL,
+};
+
 /**
  * Manage filter operations.
  *
@@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 	struct priv *priv = dev->data->dev_private;
 
 	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &mlx5_flow_ops;
+		return 0;
 	case RTE_ETH_FILTER_FDIR:
 		priv_lock(priv);
 		ret = priv_fdir_ctrl_func(priv, filter_op, arg);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 0000000..a514dff
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,122 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 6WIND S.A.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of 6WIND S.A. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include "mlx5.h"
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return NULL;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)flow;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH 2/3] net/mlx5: add software support for rte_flow
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
  2016-11-25 18:14 ` [PATCH 1/3] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
@ 2016-11-25 18:14 ` Nelio Laranjeiro
  2016-11-25 18:14 ` [PATCH 3/3] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-11-25 18:14 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Introduce initial software validation for rte_flow rules.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5.h         |   1 +
 drivers/net/mlx5/mlx5_flow.c    | 196 ++++++++++++++++++++++++++++++++++------
 drivers/net/mlx5/mlx5_trigger.c |   1 +
 3 files changed, 169 insertions(+), 29 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f4eaa..df0e77c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -136,6 +136,7 @@ struct priv {
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
 	struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
+	LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	rte_spinlock_t lock; /* Lock for control functions. */
 };
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a514dff..54807ad 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -30,11 +30,125 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <sys/queue.h>
+
 #include <rte_ethdev.h>
 #include <rte_flow.h>
 #include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
 #include "mlx5.h"
 
+struct rte_flow {
+	LIST_ENTRY(rte_flow) next;
+};
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)priv;
+	const struct rte_flow_item *ilast = NULL;
+	const struct rte_flow_action *alast = NULL;
+
+	if (attr->group) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   NULL,
+				   "groups are not supported");
+		return -rte_errno;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   NULL,
+				   "priorities are not supported");
+		return -rte_errno;
+	}
+	if (attr->egress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   NULL,
+				   "egress is not supported");
+		return -rte_errno;
+	}
+	if (!attr->ingress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   NULL,
+				   "only ingress is supported");
+		return -rte_errno;
+	}
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
+			continue;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+			if (ilast)
+				goto exit_item_not_supported;
+			ilast = items;
+		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+				goto exit_item_not_supported;
+			ilast = items;
+		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
+			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
+				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
+				goto exit_item_not_supported;
+			ilast = items;
+		} else {
+			goto exit_item_not_supported;
+		}
+	}
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if ((actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) ||
+			   (actions->type == RTE_FLOW_ACTION_TYPE_DROP)) {
+			if (alast &&
+			    alast->type != actions->type)
+				goto exit_action_not_supported;
+			alast = actions;
+		} else {
+			goto exit_action_not_supported;
+		}
+	}
+	return 0;
+exit_item_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+			   items, "item not supported");
+	return -rte_errno;
+exit_action_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+			   actions, "action not supported");
+	return -rte_errno;
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -48,15 +162,13 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	struct priv *priv = dev->data->dev_private;
+	int ret;
+
+	priv_lock(priv);
+	ret = priv_flow_validate(priv, attr, items, actions, error);
+	priv_unlock(priv);
+	return ret;
 }
 
 /**
@@ -72,15 +184,35 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		 const struct rte_flow_action actions[],
 		 struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return NULL;
+	struct priv *priv = dev->data->dev_private;
+	struct rte_flow *flow;
+
+	priv_lock(priv);
+	if (priv_flow_validate(priv, attr, items, actions, error)) {
+		priv_unlock(priv);
+		return NULL;
+	}
+	flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+	LIST_INSERT_HEAD(&priv->flows, flow, next);
+	priv_unlock(priv);
+	return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ * @param[in] flow
+ *   Pointer to the flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv,
+		  struct rte_flow *flow)
+{
+	(void)priv;
+	LIST_REMOVE(flow, next);
+	rte_free(flow);
 }
 
 /**
@@ -94,13 +226,13 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
 		  struct rte_flow *flow,
 		  struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)flow;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_destroy(priv, flow);
+	priv_unlock(priv);
+	return 0;
 }
 
 /**
@@ -113,10 +245,16 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
-	(void)dev;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	while (!LIST_EMPTY(&priv->flows)) {
+		struct rte_flow *flow;
+
+		flow = LIST_FIRST(&priv->flows);
+		priv_flow_destroy(priv, flow);
+	}
+	priv_unlock(priv);
+	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d4dccd8..98a2803 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -90,6 +90,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
 		priv_fdir_enable(priv);
 	priv_dev_interrupt_handler_install(priv, dev);
+	LIST_INIT(&priv->flows);
 	priv_unlock(priv);
 	return -err;
 }
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH 3/3] net/mlx5: add rte_flow rule creation
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
  2016-11-25 18:14 ` [PATCH 1/3] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
  2016-11-25 18:14 ` [PATCH 2/3] net/mlx5: add software " Nelio Laranjeiro
@ 2016-11-25 18:14 ` Nelio Laranjeiro
  2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-11-25 18:14 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Convert Ethernet, IPv4, IPv6, TCP, UDP layers into ibv_flow and create
those rules when after validation (i.e. NIC supports the rule).

VLAN is still not supported in this commit.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 645 ++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 631 insertions(+), 14 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 54807ad..e948000 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -31,6 +31,17 @@
  */
 
 #include <sys/queue.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
 
 #include <rte_ethdev.h>
 #include <rte_flow.h>
@@ -39,11 +50,82 @@
 
 #include "mlx5.h"
 
+/** Define a value to use as index for the drop queue. */
+#define MLX5_FLOW_DROP_QUEUE ((uint32_t)-1)
+
 struct rte_flow {
 	LIST_ENTRY(rte_flow) next;
+	struct ibv_exp_flow_attr *ibv_attr;
+	struct ibv_exp_rwq_ind_table *ind_table;
+	struct ibv_qp *qp;
+	struct ibv_exp_flow *ibv_flow;
+	struct ibv_exp_wq *wq;
+	struct ibv_cq *cq;
+	uint8_t drop;
 };
 
 /**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param mask[in]
+ *   Bit-mask covering supported fields to compare with spec, last and mask in
+ *   \item.
+ * @param size
+ *   Bit-Mask size in bytes.
+ *
+ * @return
+ *   0 on success.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+			const uint8_t *mask, unsigned int size)
+{
+	int ret = 0;
+
+	if (item->spec && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->spec;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->last && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->last;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->mask;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->spec && item->last) {
+		uint8_t spec[size];
+		uint8_t last[size];
+		const uint8_t *apply = mask;
+		unsigned int i;
+
+		if (item->mask)
+			apply = item->mask;
+		for (i = 0; i < size; ++i) {
+			spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+			last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+		}
+		ret = memcmp(spec, last, size);
+	}
+	return ret;
+}
+
+/**
  * Validate a flow supported by the NIC.
  *
  * @param priv
@@ -67,9 +149,43 @@ priv_flow_validate(struct priv *priv,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)priv;
 	const struct rte_flow_item *ilast = NULL;
 	const struct rte_flow_action *alast = NULL;
+	/* Supported mask. */
+	const struct rte_flow_item_eth eth_mask = {
+		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+	};
+	const struct rte_flow_item_ipv4 ipv4_mask = {
+		.hdr = {
+			.src_addr = -1,
+			.dst_addr = -1,
+		},
+	};
+	const struct rte_flow_item_ipv6 ipv6_mask = {
+		.hdr = {
+			.src_addr = {
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			},
+			.dst_addr = {
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			},
+		},
+	};
+	const struct rte_flow_item_udp udp_mask = {
+		.hdr = {
+			.src_port = -1,
+			.dst_port = -1,
+		},
+	};
+	const struct rte_flow_item_tcp tcp_mask = {
+		.hdr = {
+			.src_port = -1,
+			.dst_port = -1,
+		},
+	};
 
 	if (attr->group) {
 		rte_flow_error_set(error, ENOTSUP,
@@ -100,27 +216,70 @@ priv_flow_validate(struct priv *priv,
 		return -rte_errno;
 	}
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		int err = 0;
+
 		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
 			continue;
 		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
 			if (ilast)
 				goto exit_item_not_supported;
 			ilast = items;
-		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
-			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&eth_mask,
+					sizeof(eth_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
 				goto exit_item_not_supported;
 			ilast = items;
-		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
-			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&ipv4_mask,
+					sizeof(ipv4_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+				goto exit_item_not_supported;
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&ipv6_mask,
+					sizeof(ipv6_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
 				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
 				goto exit_item_not_supported;
 			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&udp_mask,
+					sizeof(udp_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
+				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
+				goto exit_item_not_supported;
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&tcp_mask,
+					sizeof(tcp_mask));
+			if (err)
+				goto exit_item_not_supported;
 		} else {
 			goto exit_item_not_supported;
 		}
@@ -128,8 +287,23 @@ priv_flow_validate(struct priv *priv,
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
 			continue;
-		} else if ((actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) ||
-			   (actions->type == RTE_FLOW_ACTION_TYPE_DROP)) {
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			const struct rte_flow_action_queue *queue =
+				(const struct rte_flow_action_queue *)
+				actions->conf;
+
+			if (alast &&
+			    alast->type != actions->type)
+				goto exit_action_not_supported;
+			if (queue->index > (priv->rxqs_n - 1)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "queue index error");
+				goto exit;
+			}
+			alast = actions;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
 			if (alast &&
 			    alast->type != actions->type)
 				goto exit_action_not_supported;
@@ -146,6 +320,7 @@ priv_flow_validate(struct priv *priv,
 exit_action_not_supported:
 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
 			   actions, "action not supported");
+exit:
 	return -rte_errno;
 }
 
@@ -172,6 +347,310 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 }
 
 /**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param eth[in, out]
+ *   Verbs Ethernet specification structure.
+ */
+static void
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_eth *eth)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	unsigned int i;
+
+	memset(eth, 0, sizeof(struct ibv_exp_flow_spec_eth));
+	*eth = (struct ibv_exp_flow_spec_eth) {
+		.type = IBV_EXP_FLOW_SPEC_ETH,
+		.size = sizeof(struct ibv_exp_flow_spec_eth),
+	};
+	if (spec) {
+		memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	if (mask) {
+		memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+		eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+		eth->val.src_mac[i] &= eth->mask.src_mac[i];
+	}
+	eth->val.ether_type &= eth->mask.ether_type;
+	eth->val.vlan_tag &= eth->mask.vlan_tag;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param ipv4[in, out]
+ *   Verbs IPv4 specification structure.
+ */
+static void
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_ipv4 *ipv4)
+{
+	const struct rte_flow_item_ipv4 *spec = item->spec;
+	const struct rte_flow_item_ipv4 *mask = item->mask;
+
+	memset(ipv4, 0, sizeof(struct ibv_exp_flow_spec_ipv4));
+	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
+		.type = IBV_EXP_FLOW_SPEC_IPV4,
+		.size = sizeof(struct ibv_exp_flow_spec_ipv4),
+	};
+	if (spec) {
+		ipv4->val = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = spec->hdr.src_addr,
+			.dst_ip = spec->hdr.dst_addr,
+		};
+	}
+	if (mask) {
+		ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = mask->hdr.src_addr,
+			.dst_ip = mask->hdr.dst_addr,
+		};
+	}
+	/* Remove unwanted bits from values. */
+	ipv4->val.src_ip &= ipv4->mask.src_ip;
+	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+}
+
+/**
+ * Convert IPv6 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param ipv6[in, out]
+ *   Verbs IPv6 specification structure.
+ */
+static void
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_ipv6 *ipv6)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	unsigned int i;
+
+	memset(ipv6, 0, sizeof(struct ibv_exp_flow_spec_ipv6));
+	ipv6->type = IBV_EXP_FLOW_SPEC_IPV6;
+	ipv6->size = sizeof(struct ibv_exp_flow_spec_ipv6);
+	if (spec) {
+		memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
+		       RTE_DIM(ipv6->val.src_ip));
+		memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
+		       RTE_DIM(ipv6->val.dst_ip));
+	}
+	if (mask) {
+		memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
+		       RTE_DIM(ipv6->mask.src_ip));
+		memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
+		       RTE_DIM(ipv6->mask.dst_ip));
+	}
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
+		ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
+		ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+	}
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param udp[in, out]
+ *   Verbs UDP specification structure.
+ */
+static void
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_tcp_udp *udp)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+
+	memset(udp, 0, sizeof(struct ibv_exp_flow_spec_tcp_udp));
+	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_UDP,
+		.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	};
+	udp->type = IBV_EXP_FLOW_SPEC_UDP;
+	if (spec) {
+		udp->val.dst_port = spec->hdr.dst_port;
+		udp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		udp->mask.dst_port = mask->hdr.dst_port;
+		udp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	udp->val.src_port &= udp->mask.src_port;
+	udp->val.dst_port &= udp->mask.dst_port;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param tcp[in, out]
+ *   Verbs TCP specification structure.
+ */
+static void
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_tcp_udp *tcp)
+{
+	const struct rte_flow_item_tcp *spec = item->spec;
+	const struct rte_flow_item_tcp *mask = item->mask;
+
+	memset(tcp, 0, sizeof(struct ibv_exp_flow_spec_tcp_udp));
+	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_TCP,
+		.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	};
+	tcp->type = IBV_EXP_FLOW_SPEC_TCP;
+	if (spec) {
+		tcp->val.dst_port = spec->hdr.dst_port;
+		tcp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		tcp->mask.dst_port = mask->hdr.dst_port;
+		tcp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	tcp->val.src_port &= tcp->mask.src_port;
+	tcp->val.dst_port &= tcp->mask.dst_port;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ * @param  ibv_attr
+ *   Verbs flow attributes.
+ * @param  queue
+ *   Destination queue.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+			      struct ibv_exp_flow_attr *ibv_attr,
+			      uint32_t queue,
+			      struct rte_flow_error *error)
+{
+	struct rxq_ctrl *rxq;
+	struct rte_flow *rte_flow;
+
+	assert(priv->pd);
+	assert(priv->ctx);
+	rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+	if (!rte_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate flow memory");
+		return NULL;
+	}
+	if (queue == MLX5_FLOW_DROP_QUEUE) {
+		rte_flow->drop = 1;
+		rte_flow->cq =
+			ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+					  &(struct ibv_exp_cq_init_attr){
+						  .comp_mask = 0,
+					  });
+		if (!rte_flow->cq) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   NULL, "cannot allocate CQ");
+			goto error;
+		}
+		rte_flow->wq = ibv_exp_create_wq(
+			priv->ctx,
+			&(struct ibv_exp_wq_init_attr){
+				.wq_type = IBV_EXP_WQT_RQ,
+				.max_recv_wr = 1,
+				.max_recv_sge = 1,
+				.pd = priv->pd,
+				.cq = rte_flow->cq,
+			});
+	} else {
+		rxq = container_of((*priv->rxqs)[queue], struct rxq_ctrl, rxq);
+		rte_flow->drop = 0;
+		rte_flow->wq = rxq->wq;
+	}
+	rte_flow->ibv_attr = ibv_attr;
+	rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
+		priv->ctx,
+		&(struct ibv_exp_rwq_ind_table_init_attr){
+			.pd = priv->pd,
+			.log_ind_tbl_size = 0,
+			.ind_tbl = &rte_flow->wq,
+			.comp_mask = 0,
+		});
+	if (!rte_flow->ind_table) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate indirection table");
+		goto error;
+	}
+	rte_flow->qp = ibv_exp_create_qp(
+		priv->ctx,
+		&(struct ibv_exp_qp_init_attr){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_EXP_QP_INIT_ATTR_PD |
+				IBV_EXP_QP_INIT_ATTR_PORT |
+				IBV_EXP_QP_INIT_ATTR_RX_HASH,
+			.pd = priv->pd,
+			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+				.rx_hash_function =
+					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_hash_default_key_len,
+				.rx_hash_key = rss_hash_default_key,
+				.rx_hash_fields_mask = 0,
+				.rwq_ind_tbl = rte_flow->ind_table,
+			},
+			.port_num = priv->port,
+		});
+	if (!rte_flow->qp) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate QP");
+		goto error;
+	}
+	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+						 rte_flow->ibv_attr);
+	if (!rte_flow->ibv_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "flow rule creation failure");
+		goto error;
+	}
+	if (LIST_EMPTY(&priv->flows))
+		LIST_INIT(&priv->flows);
+	LIST_INSERT_HEAD(&priv->flows, rte_flow, next);
+	return rte_flow;
+error:
+	assert(rte_flow);
+	if (rte_flow->qp)
+		ibv_destroy_qp(rte_flow->qp);
+	if (rte_flow->ind_table)
+		ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
+	if (rte_flow->drop && rte_flow->wq)
+		ibv_exp_destroy_wq(rte_flow->wq);
+	if (rte_flow->drop && rte_flow->cq)
+		ibv_destroy_cq(rte_flow->cq);
+	rte_free(rte_flow->ibv_attr);
+	rte_free(rte_flow);
+	return NULL;
+}
+
+/**
  * Create a flow.
  *
  * @see rte_flow_create()
@@ -185,17 +664,143 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		 struct rte_flow_error *error)
 {
 	struct priv *priv = dev->data->dev_private;
-	struct rte_flow *flow;
+	struct rte_flow *rte_flow = NULL;
+	struct ibv_exp_flow_attr *ibv_attr;
+	unsigned int flow_size = sizeof(struct ibv_exp_flow_attr);
 
 	priv_lock(priv);
-	if (priv_flow_validate(priv, attr, items, actions, error)) {
-		priv_unlock(priv);
-		return NULL;
+	if (priv_flow_validate(priv, attr, items, actions, error))
+		goto exit;
+	ibv_attr = rte_malloc(__func__, flow_size, 0);
+	if (!ibv_attr) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate ibv_attr memory");
+		goto exit;
+	}
+	*ibv_attr = (struct ibv_exp_flow_attr){
+		.type = IBV_EXP_FLOW_ATTR_NORMAL,
+		.size = sizeof(struct ibv_exp_flow_attr),
+		.priority = attr->priority,
+		.num_of_specs = 0,
+		.port = 0,
+		.flags = 0,
+		.reserved = 0,
+	};
+	/* Update ibv_flow_spec. */
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
+			continue;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+			struct ibv_exp_flow_spec_eth *eth;
+			unsigned int eth_size =
+				sizeof(struct ibv_exp_flow_spec_eth);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + eth_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			eth = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_eth(items, eth);
+			flow_size += eth_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 2;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			struct ibv_exp_flow_spec_ipv4 *ipv4;
+			unsigned int ipv4_size =
+				sizeof(struct ibv_exp_flow_spec_ipv4);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + ipv4_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			ipv4 = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_ipv4(items, ipv4);
+			flow_size += ipv4_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 1;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+			struct ibv_exp_flow_spec_ipv6 *ipv6;
+			unsigned int ipv6_size =
+				sizeof(struct ibv_exp_flow_spec_ipv6);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + ipv6_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			ipv6 = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_ipv6(items, ipv6);
+			flow_size += ipv6_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 1;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {
+			struct ibv_exp_flow_spec_tcp_udp *udp;
+			unsigned int udp_size =
+				sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + udp_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			udp = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_udp(items, udp);
+			flow_size += udp_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 0;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {
+			struct ibv_exp_flow_spec_tcp_udp *tcp;
+			unsigned int tcp_size =
+				sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + tcp_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			tcp = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_tcp(items, tcp);
+			flow_size += tcp_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 0;
+		} else {
+			/* This default rule should not happen. */
+			rte_free(ibv_attr);
+			rte_flow_error_set(
+				error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+				items, "unsupported item");
+			goto exit;
+		}
 	}
-	flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
-	LIST_INSERT_HEAD(&priv->flows, flow, next);
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			const struct rte_flow_action_queue *queue =
+				(const struct rte_flow_action_queue *)
+				actions->conf;
+
+			rte_flow = priv_flow_create_action_queue(
+					priv, ibv_attr,
+					queue->index, error);
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+			rte_flow = priv_flow_create_action_queue(
+					priv, ibv_attr,
+					MLX5_FLOW_DROP_QUEUE, error);
+		} else {
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "unsupported action");
+			goto exit;
+		}
+	}
+	priv_unlock(priv);
+	return rte_flow;
+error_no_memory:
+	rte_flow_error_set(error, ENOMEM,
+			   RTE_FLOW_ERROR_TYPE_ITEM,
+			   items,
+			   "cannot allocate memory");
+exit:
 	priv_unlock(priv);
-	return flow;
+	return NULL;
 }
 
 /**
@@ -212,6 +817,18 @@ priv_flow_destroy(struct priv *priv,
 {
 	(void)priv;
 	LIST_REMOVE(flow, next);
+	claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+	if (flow->qp)
+		claim_zero(ibv_destroy_qp(flow->qp));
+	if (flow->ind_table)
+		claim_zero(
+			ibv_exp_destroy_rwq_ind_table(
+				flow->ind_table));
+	if (flow->drop && flow->wq)
+		claim_zero(ibv_exp_destroy_wq(flow->wq));
+	if (flow->drop && flow->cq)
+		claim_zero(ibv_destroy_cq(flow->cq));
+	rte_free(flow->ibv_attr);
 	rte_free(flow);
 }
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v2 0/4] net/mlx5: support flow_rte
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
                   ` (2 preceding siblings ...)
  2016-11-25 18:14 ` [PATCH 3/3] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
@ 2016-12-21 10:01 ` Nelio Laranjeiro
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
                     ` (4 more replies)
  2016-12-21 10:01 ` [PATCH v2 1/4] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
                   ` (3 subsequent siblings)
  7 siblings, 5 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 10:01 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

This series requires rte_flow [1].

It brings rte_flow support to the same level as flow director (FDIR) in mlx5.

 [1] http://dpdk.org/ml/archives/dev/2016-December/052802.html

Changes in v2:

 - Fix several issues.
 - Support VLAN filtering.

Nelio Laranjeiro (4):
  net/mlx5: add preliminary support for rte_flow
  net/mlx5: add software support for rte_flow
  net/mlx5: add rte_flow rule creation
  net/mlx5: add VLAN filter support in rte_flow

 drivers/net/mlx5/Makefile       |    1 +
 drivers/net/mlx5/mlx5.h         |   19 +
 drivers/net/mlx5/mlx5_fdir.c    |   15 +
 drivers/net/mlx5/mlx5_flow.c    | 1030 +++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_trigger.c |    4 +
 5 files changed, 1069 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

-- 
2.1.4

^ permalink raw reply	[flat|nested] 38+ messages in thread

* [PATCH v2 1/4] net/mlx5: add preliminary support for rte_flow
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
                   ` (3 preceding siblings ...)
  2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
@ 2016-12-21 10:01 ` Nelio Laranjeiro
  2016-12-21 10:01 ` [PATCH v2 2/4] net/mlx5: add software " Nelio Laranjeiro
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 10:01 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/Makefile    |   1 +
 drivers/net/mlx5/mlx5.h      |  16 ++++++
 drivers/net/mlx5/mlx5_fdir.c |  15 ++++++
 drivers/net/mlx5/mlx5_flow.c | 122 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 154 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index cf87f0b..6d1338a 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -48,6 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
 
 # Dependencies.
 DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a60..04f4eaa 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -59,6 +59,7 @@
 #include <rte_spinlock.h>
 #include <rte_interrupts.h>
 #include <rte_errno.h>
+#include <rte_flow.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -268,4 +269,19 @@ void priv_fdir_enable(struct priv *);
 int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
 			 enum rte_filter_op, void *);
 
+/* mlx5_flow.c */
+
+int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
+		       const struct rte_flow_item [],
+		       const struct rte_flow_action [],
+		       struct rte_flow_error *);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
+				  const struct rte_flow_attr *,
+				  const struct rte_flow_item [],
+				  const struct rte_flow_action [],
+				  struct rte_flow_error *);
+int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
+		      struct rte_flow_error *);
+int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf682..f80c58b 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -55,6 +55,8 @@
 #include <rte_malloc.h>
 #include <rte_ethdev.h>
 #include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
 	return ret;
 }
 
+static const struct rte_flow_ops mlx5_flow_ops = {
+	.validate = mlx5_flow_validate,
+	.create = mlx5_flow_create,
+	.destroy = mlx5_flow_destroy,
+	.flush = mlx5_flow_flush,
+	.query = NULL,
+};
+
 /**
  * Manage filter operations.
  *
@@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 	struct priv *priv = dev->data->dev_private;
 
 	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &mlx5_flow_ops;
+		return 0;
 	case RTE_ETH_FILTER_FDIR:
 		priv_lock(priv);
 		ret = priv_fdir_ctrl_func(priv, filter_op, arg);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 0000000..a514dff
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,122 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 6WIND S.A.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of 6WIND S.A. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include "mlx5.h"
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return NULL;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)flow;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v2 2/4] net/mlx5: add software support for rte_flow
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
                   ` (4 preceding siblings ...)
  2016-12-21 10:01 ` [PATCH v2 1/4] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
@ 2016-12-21 10:01 ` Nelio Laranjeiro
  2016-12-21 10:01 ` [PATCH v2 3/4] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
  2016-12-21 10:01 ` [PATCH v2 4/4] net/mlx5: add VLAN filter support in rte_flow Nelio Laranjeiro
  7 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 10:01 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Introduce initial software validation for rte_flow rules.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5.h         |   2 +
 drivers/net/mlx5/mlx5_flow.c    | 202 ++++++++++++++++++++++++++++++++++------
 drivers/net/mlx5/mlx5_trigger.c |   2 +
 3 files changed, 177 insertions(+), 29 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f4eaa..ac995a0 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -136,6 +136,7 @@ struct priv {
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
 	struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
+	LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	rte_spinlock_t lock; /* Lock for control functions. */
 };
@@ -283,5 +284,6 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
 int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
 		      struct rte_flow_error *);
 int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+void priv_flow_flush(struct priv *);
 
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a514dff..3e5098a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -30,11 +30,119 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <sys/queue.h>
+
 #include <rte_ethdev.h>
 #include <rte_flow.h>
 #include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
 #include "mlx5.h"
 
+struct rte_flow {
+	LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure. */
+};
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)priv;
+	const struct rte_flow_item *ilast = NULL;
+
+	if (attr->group) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   NULL,
+				   "groups are not supported");
+		return -rte_errno;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   NULL,
+				   "priorities are not supported");
+		return -rte_errno;
+	}
+	if (attr->egress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   NULL,
+				   "egress is not supported");
+		return -rte_errno;
+	}
+	if (!attr->ingress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   NULL,
+				   "only ingress is supported");
+		return -rte_errno;
+	}
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
+			continue;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+			if (ilast)
+				goto exit_item_not_supported;
+			ilast = items;
+		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+				goto exit_item_not_supported;
+			ilast = items;
+		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
+			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
+				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
+				goto exit_item_not_supported;
+			ilast = items;
+		} else {
+			goto exit_item_not_supported;
+		}
+	}
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID ||
+		    actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
+		    actions->type == RTE_FLOW_ACTION_TYPE_DROP)
+			continue;
+		else
+			goto exit_action_not_supported;
+	}
+	return 0;
+exit_item_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+			   items, "item not supported");
+	return -rte_errno;
+exit_action_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+			   actions, "action not supported");
+	return -rte_errno;
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -48,15 +156,13 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	struct priv *priv = dev->data->dev_private;
+	int ret;
+
+	priv_lock(priv);
+	ret = priv_flow_validate(priv, attr, items, actions, error);
+	priv_unlock(priv);
+	return ret;
 }
 
 /**
@@ -72,15 +178,35 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		 const struct rte_flow_action actions[],
 		 struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return NULL;
+	struct priv *priv = dev->data->dev_private;
+	struct rte_flow *flow;
+
+	priv_lock(priv);
+	if (priv_flow_validate(priv, attr, items, actions, error)) {
+		priv_unlock(priv);
+		return NULL;
+	}
+	flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+	LIST_INSERT_HEAD(&priv->flows, flow, next);
+	priv_unlock(priv);
+	return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ * @param[in] flow
+ *   Pointer to the flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv,
+		  struct rte_flow *flow)
+{
+	(void)priv;
+	LIST_REMOVE(flow, next);
+	rte_free(flow);
 }
 
 /**
@@ -94,13 +220,30 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
 		  struct rte_flow *flow,
 		  struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)flow;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_destroy(priv, flow);
+	priv_unlock(priv);
+	return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ */
+void
+priv_flow_flush(struct priv *priv)
+{
+	while (!LIST_EMPTY(&priv->flows)) {
+		struct rte_flow *flow;
+
+		flow = LIST_FIRST(&priv->flows);
+		priv_flow_destroy(priv, flow);
+	}
 }
 
 /**
@@ -113,10 +256,11 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
-	(void)dev;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_flush(priv);
+	priv_unlock(priv);
+	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d4dccd8..4a359d7 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -90,6 +90,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
 		priv_fdir_enable(priv);
 	priv_dev_interrupt_handler_install(priv, dev);
+	LIST_INIT(&priv->flows);
 	priv_unlock(priv);
 	return -err;
 }
@@ -120,6 +121,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv_mac_addrs_disable(priv);
 	priv_destroy_hash_rxqs(priv);
 	priv_fdir_disable(priv);
+	priv_flow_flush(priv);
 	priv_dev_interrupt_handler_uninstall(priv, dev);
 	priv->started = 0;
 	priv_unlock(priv);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v2 3/4] net/mlx5: add rte_flow rule creation
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
                   ` (5 preceding siblings ...)
  2016-12-21 10:01 ` [PATCH v2 2/4] net/mlx5: add software " Nelio Laranjeiro
@ 2016-12-21 10:01 ` Nelio Laranjeiro
  2016-12-21 10:01 ` [PATCH v2 4/4] net/mlx5: add VLAN filter support in rte_flow Nelio Laranjeiro
  7 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 10:01 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Convert Ethernet, IPv4, IPv6, TCP, UDP layers into ibv_flow and create
those rules when after validation (i.e. NIC supports the rule).

VLAN is still not supported in this commit.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5.h         |   3 +-
 drivers/net/mlx5/mlx5_flow.c    | 737 +++++++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_trigger.c |   6 +-
 3 files changed, 729 insertions(+), 17 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index ac995a0..ca7e84c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -284,6 +284,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
 int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
 		      struct rte_flow_error *);
 int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
-void priv_flow_flush(struct priv *);
+int priv_flow_apply(struct priv *);
+void priv_flow_remove(struct priv *);
 
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3e5098a..44e2fb8 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -31,6 +31,17 @@
  */
 
 #include <sys/queue.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
 
 #include <rte_ethdev.h>
 #include <rte_flow.h>
@@ -39,11 +50,82 @@
 
 #include "mlx5.h"
 
+/** Define a value to use as index for the drop queue. */
+#define MLX5_FLOW_DROP_QUEUE ((uint32_t)-1)
+
 struct rte_flow {
 	LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure. */
+	struct ibv_exp_flow_attr *ibv_attr; /* Pointer to Verbs attributes. */
+	struct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */
+	struct ibv_qp *qp; /* Verbs Queue pair. */
+	struct ibv_exp_flow *ibv_flow; /* Verbs flow. */
+	struct ibv_exp_wq *wq; /* Verbs work queue. */
+	struct ibv_cq *cq; /* Verbs completion queue. */
+	uint8_t drop; /* 1 if this flow is associated to a drop queue. */
 };
 
 /**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param mask[in]
+ *   Bit-mask covering supported fields to compare with spec, last and mask in
+ *   \item.
+ * @param size
+ *   Bit-Mask size in bytes.
+ *
+ * @return
+ *   0 on success.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+			const uint8_t *mask, unsigned int size)
+{
+	int ret = 0;
+
+	if (item->spec && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->spec;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->last && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->last;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->mask;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->spec && item->last) {
+		uint8_t spec[size];
+		uint8_t last[size];
+		const uint8_t *apply = mask;
+		unsigned int i;
+
+		if (item->mask)
+			apply = item->mask;
+		for (i = 0; i < size; ++i) {
+			spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+			last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+		}
+		ret = memcmp(spec, last, size);
+	}
+	return ret;
+}
+
+/**
  * Validate a flow supported by the NIC.
  *
  * @param priv
@@ -67,8 +149,42 @@ priv_flow_validate(struct priv *priv,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)priv;
 	const struct rte_flow_item *ilast = NULL;
+	const struct rte_flow_item_eth eth_mask = {
+		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.type = -1,
+	};
+	const struct rte_flow_item_ipv4 ipv4_mask = {
+		.hdr = {
+			.src_addr = -1,
+			.dst_addr = -1,
+		},
+	};
+	const struct rte_flow_item_ipv6 ipv6_mask = {
+		.hdr = {
+			.src_addr = {
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			},
+			.dst_addr = {
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			},
+		},
+	};
+	const struct rte_flow_item_udp udp_mask = {
+		.hdr = {
+			.src_port = -1,
+			.dst_port = -1,
+		},
+	};
+	const struct rte_flow_item_tcp tcp_mask = {
+		.hdr = {
+			.src_port = -1,
+			.dst_port = -1,
+		},
+	};
 
 	if (attr->group) {
 		rte_flow_error_set(error, ENOTSUP,
@@ -99,38 +215,93 @@ priv_flow_validate(struct priv *priv,
 		return -rte_errno;
 	}
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		int err = 0;
+
 		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
 			continue;
 		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
 			if (ilast)
 				goto exit_item_not_supported;
 			ilast = items;
-		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
-			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&eth_mask,
+					sizeof(eth_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+				goto exit_item_not_supported;
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&ipv4_mask,
+					sizeof(ipv4_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
 				goto exit_item_not_supported;
 			ilast = items;
-		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
-			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&ipv6_mask,
+					sizeof(ipv6_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
+				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
+				goto exit_item_not_supported;
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&udp_mask,
+					sizeof(udp_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
 				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
 				goto exit_item_not_supported;
 			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&tcp_mask,
+					sizeof(tcp_mask));
+			if (err)
+				goto exit_item_not_supported;
 		} else {
 			goto exit_item_not_supported;
 		}
 	}
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID ||
-		    actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
-		    actions->type == RTE_FLOW_ACTION_TYPE_DROP)
+		    actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
 			continue;
-		else
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			const struct rte_flow_action_queue *queue =
+				(const struct rte_flow_action_queue *)
+				actions->conf;
+
+			if (!queue || (queue->index > (priv->rxqs_n - 1))) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "queue index error");
+				goto exit;
+			}
+		} else {
 			goto exit_action_not_supported;
+		}
 	}
 	return 0;
 exit_item_not_supported:
@@ -140,6 +311,7 @@ priv_flow_validate(struct priv *priv,
 exit_action_not_supported:
 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
 			   actions, "action not supported");
+exit:
 	return -rte_errno;
 }
 
@@ -166,6 +338,479 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 }
 
 /**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param eth[in, out]
+ *   Verbs Ethernet specification structure.
+ */
+static void
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_eth *eth)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	unsigned int i;
+
+	*eth = (struct ibv_exp_flow_spec_eth) {
+		.type = IBV_EXP_FLOW_SPEC_ETH,
+		.size = sizeof(struct ibv_exp_flow_spec_eth),
+	};
+	if (spec) {
+		memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	if (mask) {
+		memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	eth->val.ether_type = spec->type;
+	eth->mask.ether_type = mask->type;
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+		eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+		eth->val.src_mac[i] &= eth->mask.src_mac[i];
+	}
+	eth->val.ether_type &= eth->mask.ether_type;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param ipv4[in, out]
+ *   Verbs IPv4 specification structure.
+ */
+static void
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_ipv4 *ipv4)
+{
+	const struct rte_flow_item_ipv4 *spec = item->spec;
+	const struct rte_flow_item_ipv4 *mask = item->mask;
+
+	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
+		.type = IBV_EXP_FLOW_SPEC_IPV4,
+		.size = sizeof(struct ibv_exp_flow_spec_ipv4),
+	};
+	if (spec) {
+		ipv4->val = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = spec->hdr.src_addr,
+			.dst_ip = spec->hdr.dst_addr,
+		};
+	}
+	if (mask) {
+		ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = mask->hdr.src_addr,
+			.dst_ip = mask->hdr.dst_addr,
+		};
+	}
+	/* Remove unwanted bits from values. */
+	ipv4->val.src_ip &= ipv4->mask.src_ip;
+	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+}
+
+/**
+ * Convert IPv6 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param ipv6[in, out]
+ *   Verbs IPv6 specification structure.
+ */
+static void
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_ipv6 *ipv6)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	unsigned int i;
+
+	*ipv6 = (struct ibv_exp_flow_spec_ipv6) {
+		.type = IBV_EXP_FLOW_SPEC_IPV6,
+		.size = sizeof(struct ibv_exp_flow_spec_ipv6),
+	};
+	if (spec) {
+		memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
+		       RTE_DIM(ipv6->val.src_ip));
+		memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
+		       RTE_DIM(ipv6->val.dst_ip));
+	}
+	if (mask) {
+		memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
+		       RTE_DIM(ipv6->mask.src_ip));
+		memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
+		       RTE_DIM(ipv6->mask.dst_ip));
+	}
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
+		ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
+		ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+	}
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param udp[in, out]
+ *   Verbs UDP specification structure.
+ */
+static void
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_tcp_udp *udp)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+
+	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_UDP,
+		.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	};
+	udp->type = IBV_EXP_FLOW_SPEC_UDP;
+	if (spec) {
+		udp->val.dst_port = spec->hdr.dst_port;
+		udp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		udp->mask.dst_port = mask->hdr.dst_port;
+		udp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	udp->val.src_port &= udp->mask.src_port;
+	udp->val.dst_port &= udp->mask.dst_port;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param tcp[in, out]
+ *   Verbs TCP specification structure.
+ */
+static void
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_tcp_udp *tcp)
+{
+	const struct rte_flow_item_tcp *spec = item->spec;
+	const struct rte_flow_item_tcp *mask = item->mask;
+
+	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_TCP,
+		.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	};
+	tcp->type = IBV_EXP_FLOW_SPEC_TCP;
+	if (spec) {
+		tcp->val.dst_port = spec->hdr.dst_port;
+		tcp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		tcp->mask.dst_port = mask->hdr.dst_port;
+		tcp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	tcp->val.src_port &= tcp->mask.src_port;
+	tcp->val.dst_port &= tcp->mask.dst_port;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ * @param  ibv_attr
+ *   Verbs flow attributes.
+ * @param  queue
+ *   Destination queue.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+			      struct ibv_exp_flow_attr *ibv_attr,
+			      uint32_t queue,
+			      struct rte_flow_error *error)
+{
+	struct rxq_ctrl *rxq;
+	struct rte_flow *rte_flow;
+
+	assert(priv->pd);
+	assert(priv->ctx);
+	rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+	if (!rte_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate flow memory");
+		return NULL;
+	}
+	if (queue == MLX5_FLOW_DROP_QUEUE) {
+		rte_flow->drop = 1;
+		rte_flow->cq =
+			ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+					  &(struct ibv_exp_cq_init_attr){
+						  .comp_mask = 0,
+					  });
+		if (!rte_flow->cq) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   NULL, "cannot allocate CQ");
+			goto error;
+		}
+		rte_flow->wq = ibv_exp_create_wq(
+			priv->ctx,
+			&(struct ibv_exp_wq_init_attr){
+				.wq_type = IBV_EXP_WQT_RQ,
+				.max_recv_wr = 1,
+				.max_recv_sge = 1,
+				.pd = priv->pd,
+				.cq = rte_flow->cq,
+			});
+	} else {
+		rxq = container_of((*priv->rxqs)[queue], struct rxq_ctrl, rxq);
+		rte_flow->drop = 0;
+		rte_flow->wq = rxq->wq;
+	}
+	rte_flow->ibv_attr = ibv_attr;
+	rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
+		priv->ctx,
+		&(struct ibv_exp_rwq_ind_table_init_attr){
+			.pd = priv->pd,
+			.log_ind_tbl_size = 0,
+			.ind_tbl = &rte_flow->wq,
+			.comp_mask = 0,
+		});
+	if (!rte_flow->ind_table) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate indirection table");
+		goto error;
+	}
+	rte_flow->qp = ibv_exp_create_qp(
+		priv->ctx,
+		&(struct ibv_exp_qp_init_attr){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_EXP_QP_INIT_ATTR_PD |
+				IBV_EXP_QP_INIT_ATTR_PORT |
+				IBV_EXP_QP_INIT_ATTR_RX_HASH,
+			.pd = priv->pd,
+			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+				.rx_hash_function =
+					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_hash_default_key_len,
+				.rx_hash_key = rss_hash_default_key,
+				.rx_hash_fields_mask = 0,
+				.rwq_ind_tbl = rte_flow->ind_table,
+			},
+			.port_num = priv->port,
+		});
+	if (!rte_flow->qp) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate QP");
+		goto error;
+	}
+	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+						 rte_flow->ibv_attr);
+	if (!rte_flow->ibv_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "flow rule creation failure");
+		goto error;
+	}
+	return rte_flow;
+error:
+	assert(rte_flow);
+	if (rte_flow->qp)
+		ibv_destroy_qp(rte_flow->qp);
+	if (rte_flow->ind_table)
+		ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
+	if (rte_flow->drop && rte_flow->wq)
+		ibv_exp_destroy_wq(rte_flow->wq);
+	if (rte_flow->drop && rte_flow->cq)
+		ibv_destroy_cq(rte_flow->cq);
+	rte_free(rte_flow->ibv_attr);
+	rte_free(rte_flow);
+	return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   a flow on success, null otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct rte_flow *rte_flow = NULL;
+	struct ibv_exp_flow_attr *ibv_attr;
+	unsigned int flow_size = sizeof(struct ibv_exp_flow_attr);
+	struct action {
+		int queue;
+		int drop;
+	} action;
+	unsigned int queue = MLX5_FLOW_DROP_QUEUE;
+
+	if (priv_flow_validate(priv, attr, items, actions, error))
+		goto exit;
+	ibv_attr = rte_malloc(__func__, flow_size, 0);
+	if (!ibv_attr) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate ibv_attr memory");
+		goto exit;
+	}
+	*ibv_attr = (struct ibv_exp_flow_attr){
+		.type = IBV_EXP_FLOW_ATTR_NORMAL,
+		.size = sizeof(struct ibv_exp_flow_attr),
+		.priority = attr->priority,
+		.num_of_specs = 0,
+		.port = 0,
+		.flags = 0,
+		.reserved = 0,
+	};
+	/* Update ibv_flow_spec. */
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
+			continue;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+			struct ibv_exp_flow_spec_eth *eth;
+			unsigned int eth_size =
+				sizeof(struct ibv_exp_flow_spec_eth);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + eth_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			eth = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_eth(items, eth);
+			flow_size += eth_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 2;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			struct ibv_exp_flow_spec_ipv4 *ipv4;
+			unsigned int ipv4_size =
+				sizeof(struct ibv_exp_flow_spec_ipv4);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + ipv4_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			ipv4 = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_ipv4(items, ipv4);
+			flow_size += ipv4_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 1;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+			struct ibv_exp_flow_spec_ipv6 *ipv6;
+			unsigned int ipv6_size =
+				sizeof(struct ibv_exp_flow_spec_ipv6);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + ipv6_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			ipv6 = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_ipv6(items, ipv6);
+			flow_size += ipv6_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 1;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {
+			struct ibv_exp_flow_spec_tcp_udp *udp;
+			unsigned int udp_size =
+				sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + udp_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			udp = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_udp(items, udp);
+			flow_size += udp_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 0;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {
+			struct ibv_exp_flow_spec_tcp_udp *tcp;
+			unsigned int tcp_size =
+				sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + tcp_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			tcp = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_tcp(items, tcp);
+			flow_size += tcp_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 0;
+		} else {
+			/* This default rule should not happen. */
+			rte_free(ibv_attr);
+			rte_flow_error_set(
+				error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+				items, "unsupported item");
+			goto exit;
+		}
+	}
+	action = (struct action) {
+		.queue = -1,
+		.drop = 0,
+	};
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			action.queue = ((const struct rte_flow_action_queue *)
+					actions->conf)->index;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+			action.drop = 1;
+		} else {
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "unsupported action");
+			goto exit;
+		}
+	}
+	if (action.queue >= 0) {
+		queue = action.queue;
+	} else if (action.drop) {
+		queue = MLX5_FLOW_DROP_QUEUE;
+	} else {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions,
+				   "no possible action found");
+		goto exit;
+	}
+	rte_flow = priv_flow_create_action_queue(priv, ibv_attr, queue, error);
+	return rte_flow;
+error_no_memory:
+	rte_flow_error_set(error, ENOMEM,
+			   RTE_FLOW_ERROR_TYPE_ITEM,
+			   items,
+			   "cannot allocate memory");
+exit:
+	return NULL;
+}
+
+/**
  * Create a flow.
  *
  * @see rte_flow_create()
@@ -182,12 +827,11 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 	struct rte_flow *flow;
 
 	priv_lock(priv);
-	if (priv_flow_validate(priv, attr, items, actions, error)) {
-		priv_unlock(priv);
-		return NULL;
+	flow = priv_flow_create(priv, attr, items, actions, error);
+	if (flow) {
+		LIST_INSERT_HEAD(&priv->flows, flow, next);
+		DEBUG("Flow created %p", (void *)flow);
 	}
-	flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
-	LIST_INSERT_HEAD(&priv->flows, flow, next);
 	priv_unlock(priv);
 	return flow;
 }
@@ -206,6 +850,20 @@ priv_flow_destroy(struct priv *priv,
 {
 	(void)priv;
 	LIST_REMOVE(flow, next);
+	if (flow->ibv_flow)
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+	if (flow->qp)
+		claim_zero(ibv_destroy_qp(flow->qp));
+	if (flow->ind_table)
+		claim_zero(
+			ibv_exp_destroy_rwq_ind_table(
+				flow->ind_table));
+	if (flow->drop && flow->wq)
+		claim_zero(ibv_exp_destroy_wq(flow->wq));
+	if (flow->drop && flow->cq)
+		claim_zero(ibv_destroy_cq(flow->cq));
+	rte_free(flow->ibv_attr);
+	DEBUG("Flow destroyed %p", (void *)flow);
 	rte_free(flow);
 }
 
@@ -235,7 +893,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
  * @param  priv
  *   Pointer to private structure.
  */
-void
+static void
 priv_flow_flush(struct priv *priv)
 {
 	while (!LIST_EMPTY(&priv->flows)) {
@@ -264,3 +922,54 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
 	priv_unlock(priv);
 	return 0;
 }
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ */
+void
+priv_flow_remove(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+		flow->ibv_flow = NULL;
+		DEBUG("Flow %p removed", (void *)flow);
+	}
+}
+
+/**
+ * Add all flows.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+priv_flow_apply(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		flow->ibv_flow = ibv_exp_create_flow(flow->qp,
+						     flow->ibv_attr);
+		if (!flow->ibv_flow) {
+			DEBUG("Flow %p cannot be applied", (void *)flow);
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+		DEBUG("Flow %p applied", (void *)flow);
+	}
+	return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 4a359d7..e17960e 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -90,7 +90,9 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
 		priv_fdir_enable(priv);
 	priv_dev_interrupt_handler_install(priv, dev);
-	LIST_INIT(&priv->flows);
+	if (LIST_EMPTY(&priv->flows))
+		LIST_INIT(&priv->flows);
+	err = priv_flow_apply(priv);
 	priv_unlock(priv);
 	return -err;
 }
@@ -121,7 +123,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv_mac_addrs_disable(priv);
 	priv_destroy_hash_rxqs(priv);
 	priv_fdir_disable(priv);
-	priv_flow_flush(priv);
+	priv_flow_remove(priv);
 	priv_dev_interrupt_handler_uninstall(priv, dev);
 	priv->started = 0;
 	priv_unlock(priv);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v2 4/4] net/mlx5: add VLAN filter support in rte_flow
  2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
                   ` (6 preceding siblings ...)
  2016-12-21 10:01 ` [PATCH v2 3/4] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
@ 2016-12-21 10:01 ` Nelio Laranjeiro
  7 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 10:01 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 59 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 57 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 44e2fb8..fec1950 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -185,6 +185,9 @@ priv_flow_validate(struct priv *priv,
 			.dst_port = -1,
 		},
 	};
+	const struct rte_flow_item_vlan vlan_mask = {
+		.tci = -1,
+	};
 
 	if (attr->group) {
 		rte_flow_error_set(error, ENOTSUP,
@@ -229,11 +232,32 @@ priv_flow_validate(struct priv *priv,
 					sizeof(eth_mask));
 			if (err)
 				goto exit_item_not_supported;
-		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
 				goto exit_item_not_supported;
+			if (((const struct rte_flow_item_vlan *)items)->tci >
+			    ETHER_MAX_VLAN_ID) {
+				rte_flow_error_set(error, ENOTSUP,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   items,
+						   "wrong VLAN tci value");
+				goto exit;
+			}
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&vlan_mask,
+					sizeof(vlan_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH &&
+				 ilast->type != RTE_FLOW_ITEM_TYPE_VLAN)
+				goto exit_item_not_supported;
 			ilast = items;
 			err = mlx5_flow_item_validate(
 					items,
@@ -244,7 +268,8 @@ priv_flow_validate(struct priv *priv,
 		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
 			if (!ilast)
 				goto exit_item_not_supported;
-			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH &&
+				 ilast->type != RTE_FLOW_ITEM_TYPE_VLAN)
 				goto exit_item_not_supported;
 			ilast = items;
 			err = mlx5_flow_item_validate(
@@ -376,6 +401,28 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param eth[in, out]
+ *   Verbs Ethernet specification structure.
+ */
+static void
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_eth *eth)
+{
+	const struct rte_flow_item_vlan *spec = item->spec;
+	const struct rte_flow_item_vlan *mask = item->mask;
+
+	if (spec)
+		eth->val.vlan_tag = spec->tci;
+	if (mask)
+		eth->mask.vlan_tag = mask->tci;
+	eth->val.vlan_tag &= eth->mask.vlan_tag;
+}
+
+/**
  * Convert IPv4 item to Verbs specification.
  *
  * @param item[in]
@@ -704,6 +751,14 @@ priv_flow_create(struct priv *priv,
 			flow_size += eth_size;
 			++ibv_attr->num_of_specs;
 			ibv_attr->priority = 2;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+			struct ibv_exp_flow_spec_eth *eth;
+			unsigned int eth_size =
+				sizeof(struct ibv_exp_flow_spec_eth);
+
+			eth = (void *)((uintptr_t)ibv_attr + flow_size -
+				       eth_size);
+			mlx5_flow_create_vlan(items, eth);
 		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
 			struct ibv_exp_flow_spec_ipv4 *ipv4;
 			unsigned int ipv4_size =
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v3 0/4] net/mlx5: support flow_rte
  2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
@ 2016-12-21 15:19   ` Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
                       ` (6 more replies)
  2016-12-21 15:19   ` [PATCH v3 1/4] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
                     ` (3 subsequent siblings)
  4 siblings, 7 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 15:19 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

This series requires rte_flow [1].

It brings rte_flow support to the same level as flow director (FDIR) in mlx5.

 [1] http://dpdk.org/ml/archives/dev/2016-December/052950.html


Changes in v3:

 - Fix Ethernet ether type issue.

Changes in v2:

 - Fix several issues.
 - Support VLAN filtering.

Nelio Laranjeiro (4):
  net/mlx5: add preliminary support for rte_flow
  net/mlx5: add software support for rte_flow
  net/mlx5: add rte_flow rule creation
  net/mlx5: add VLAN filter support in rte_flow

 drivers/net/mlx5/Makefile       |    1 +
 drivers/net/mlx5/mlx5.h         |   19 +
 drivers/net/mlx5/mlx5_fdir.c    |   15 +
 drivers/net/mlx5/mlx5_flow.c    | 1026 +++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_trigger.c |    4 +
 5 files changed, 1065 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

-- 
2.1.4

^ permalink raw reply	[flat|nested] 38+ messages in thread

* [PATCH v3 1/4] net/mlx5: add preliminary support for rte_flow
  2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
@ 2016-12-21 15:19   ` Nelio Laranjeiro
  2016-12-21 15:19   ` [PATCH v3 2/4] net/mlx5: add software " Nelio Laranjeiro
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 15:19 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/Makefile    |   1 +
 drivers/net/mlx5/mlx5.h      |  16 ++++++
 drivers/net/mlx5/mlx5_fdir.c |  15 ++++++
 drivers/net/mlx5/mlx5_flow.c | 122 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 154 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index cf87f0b..6d1338a 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -48,6 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
 
 # Dependencies.
 DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a60..04f4eaa 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -59,6 +59,7 @@
 #include <rte_spinlock.h>
 #include <rte_interrupts.h>
 #include <rte_errno.h>
+#include <rte_flow.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -268,4 +269,19 @@ void priv_fdir_enable(struct priv *);
 int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
 			 enum rte_filter_op, void *);
 
+/* mlx5_flow.c */
+
+int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
+		       const struct rte_flow_item [],
+		       const struct rte_flow_action [],
+		       struct rte_flow_error *);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
+				  const struct rte_flow_attr *,
+				  const struct rte_flow_item [],
+				  const struct rte_flow_action [],
+				  struct rte_flow_error *);
+int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
+		      struct rte_flow_error *);
+int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf682..f80c58b 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -55,6 +55,8 @@
 #include <rte_malloc.h>
 #include <rte_ethdev.h>
 #include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
 	return ret;
 }
 
+static const struct rte_flow_ops mlx5_flow_ops = {
+	.validate = mlx5_flow_validate,
+	.create = mlx5_flow_create,
+	.destroy = mlx5_flow_destroy,
+	.flush = mlx5_flow_flush,
+	.query = NULL,
+};
+
 /**
  * Manage filter operations.
  *
@@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 	struct priv *priv = dev->data->dev_private;
 
 	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &mlx5_flow_ops;
+		return 0;
 	case RTE_ETH_FILTER_FDIR:
 		priv_lock(priv);
 		ret = priv_fdir_ctrl_func(priv, filter_op, arg);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 0000000..a514dff
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,122 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 6WIND S.A.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of 6WIND S.A. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include "mlx5.h"
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return NULL;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)flow;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_NONE,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v3 2/4] net/mlx5: add software support for rte_flow
  2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
  2016-12-21 15:19   ` [PATCH v3 1/4] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
@ 2016-12-21 15:19   ` Nelio Laranjeiro
  2016-12-23 12:19     ` Ferruh Yigit
  2016-12-21 15:19   ` [PATCH v3 3/4] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
  2016-12-21 15:19   ` [PATCH v3 4/4] net/mlx5: add VLAN filter support in rte_flow Nelio Laranjeiro
  4 siblings, 1 reply; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 15:19 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Introduce initial software validation for rte_flow rules.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5.h         |   2 +
 drivers/net/mlx5/mlx5_flow.c    | 202 ++++++++++++++++++++++++++++++++++------
 drivers/net/mlx5/mlx5_trigger.c |   2 +
 3 files changed, 177 insertions(+), 29 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f4eaa..ac995a0 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -136,6 +136,7 @@ struct priv {
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
 	struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
+	LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	rte_spinlock_t lock; /* Lock for control functions. */
 };
@@ -283,5 +284,6 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
 int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
 		      struct rte_flow_error *);
 int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+void priv_flow_flush(struct priv *);
 
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a514dff..3e5098a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -30,11 +30,119 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <sys/queue.h>
+
 #include <rte_ethdev.h>
 #include <rte_flow.h>
 #include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
 #include "mlx5.h"
 
+struct rte_flow {
+	LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure. */
+};
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)priv;
+	const struct rte_flow_item *ilast = NULL;
+
+	if (attr->group) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   NULL,
+				   "groups are not supported");
+		return -rte_errno;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   NULL,
+				   "priorities are not supported");
+		return -rte_errno;
+	}
+	if (attr->egress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   NULL,
+				   "egress is not supported");
+		return -rte_errno;
+	}
+	if (!attr->ingress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   NULL,
+				   "only ingress is supported");
+		return -rte_errno;
+	}
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
+			continue;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+			if (ilast)
+				goto exit_item_not_supported;
+			ilast = items;
+		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+				goto exit_item_not_supported;
+			ilast = items;
+		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
+			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
+				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
+				goto exit_item_not_supported;
+			ilast = items;
+		} else {
+			goto exit_item_not_supported;
+		}
+	}
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID ||
+		    actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
+		    actions->type == RTE_FLOW_ACTION_TYPE_DROP)
+			continue;
+		else
+			goto exit_action_not_supported;
+	}
+	return 0;
+exit_item_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+			   items, "item not supported");
+	return -rte_errno;
+exit_action_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+			   actions, "action not supported");
+	return -rte_errno;
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -48,15 +156,13 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	struct priv *priv = dev->data->dev_private;
+	int ret;
+
+	priv_lock(priv);
+	ret = priv_flow_validate(priv, attr, items, actions, error);
+	priv_unlock(priv);
+	return ret;
 }
 
 /**
@@ -72,15 +178,35 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		 const struct rte_flow_action actions[],
 		 struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return NULL;
+	struct priv *priv = dev->data->dev_private;
+	struct rte_flow *flow;
+
+	priv_lock(priv);
+	if (priv_flow_validate(priv, attr, items, actions, error)) {
+		priv_unlock(priv);
+		return NULL;
+	}
+	flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+	LIST_INSERT_HEAD(&priv->flows, flow, next);
+	priv_unlock(priv);
+	return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ * @param[in] flow
+ *   Pointer to the flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv,
+		  struct rte_flow *flow)
+{
+	(void)priv;
+	LIST_REMOVE(flow, next);
+	rte_free(flow);
 }
 
 /**
@@ -94,13 +220,30 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
 		  struct rte_flow *flow,
 		  struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)flow;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_destroy(priv, flow);
+	priv_unlock(priv);
+	return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ */
+void
+priv_flow_flush(struct priv *priv)
+{
+	while (!LIST_EMPTY(&priv->flows)) {
+		struct rte_flow *flow;
+
+		flow = LIST_FIRST(&priv->flows);
+		priv_flow_destroy(priv, flow);
+	}
 }
 
 /**
@@ -113,10 +256,11 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
-	(void)dev;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_NONE,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_flush(priv);
+	priv_unlock(priv);
+	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d4dccd8..4a359d7 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -90,6 +90,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
 		priv_fdir_enable(priv);
 	priv_dev_interrupt_handler_install(priv, dev);
+	LIST_INIT(&priv->flows);
 	priv_unlock(priv);
 	return -err;
 }
@@ -120,6 +121,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv_mac_addrs_disable(priv);
 	priv_destroy_hash_rxqs(priv);
 	priv_fdir_disable(priv);
+	priv_flow_flush(priv);
 	priv_dev_interrupt_handler_uninstall(priv, dev);
 	priv->started = 0;
 	priv_unlock(priv);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v3 3/4] net/mlx5: add rte_flow rule creation
  2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
                     ` (2 preceding siblings ...)
  2016-12-21 15:19   ` [PATCH v3 2/4] net/mlx5: add software " Nelio Laranjeiro
@ 2016-12-21 15:19   ` Nelio Laranjeiro
  2016-12-23 12:21     ` Ferruh Yigit
  2016-12-21 15:19   ` [PATCH v3 4/4] net/mlx5: add VLAN filter support in rte_flow Nelio Laranjeiro
  4 siblings, 1 reply; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 15:19 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Convert Ethernet, IPv4, IPv6, TCP, UDP layers into ibv_flow and create
those rules when after validation (i.e. NIC supports the rule).

VLAN is still not supported in this commit.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5.h         |   3 +-
 drivers/net/mlx5/mlx5_flow.c    | 733 +++++++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_trigger.c |   6 +-
 3 files changed, 725 insertions(+), 17 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index ac995a0..ca7e84c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -284,6 +284,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
 int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
 		      struct rte_flow_error *);
 int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
-void priv_flow_flush(struct priv *);
+int priv_flow_apply(struct priv *);
+void priv_flow_remove(struct priv *);
 
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3e5098a..a33c568 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -31,6 +31,17 @@
  */
 
 #include <sys/queue.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
 
 #include <rte_ethdev.h>
 #include <rte_flow.h>
@@ -39,11 +50,82 @@
 
 #include "mlx5.h"
 
+/** Define a value to use as index for the drop queue. */
+#define MLX5_FLOW_DROP_QUEUE ((uint32_t)-1)
+
 struct rte_flow {
 	LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure. */
+	struct ibv_exp_flow_attr *ibv_attr; /* Pointer to Verbs attributes. */
+	struct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */
+	struct ibv_qp *qp; /* Verbs Queue pair. */
+	struct ibv_exp_flow *ibv_flow; /* Verbs flow. */
+	struct ibv_exp_wq *wq; /* Verbs work queue. */
+	struct ibv_cq *cq; /* Verbs completion queue. */
+	uint8_t drop; /* 1 if this flow is associated to a drop queue. */
 };
 
 /**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param mask[in]
+ *   Bit-mask covering supported fields to compare with spec, last and mask in
+ *   \item.
+ * @param size
+ *   Bit-Mask size in bytes.
+ *
+ * @return
+ *   0 on success.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+			const uint8_t *mask, unsigned int size)
+{
+	int ret = 0;
+
+	if (item->spec && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->spec;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->last && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->last;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->mask;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->spec && item->last) {
+		uint8_t spec[size];
+		uint8_t last[size];
+		const uint8_t *apply = mask;
+		unsigned int i;
+
+		if (item->mask)
+			apply = item->mask;
+		for (i = 0; i < size; ++i) {
+			spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+			last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+		}
+		ret = memcmp(spec, last, size);
+	}
+	return ret;
+}
+
+/**
  * Validate a flow supported by the NIC.
  *
  * @param priv
@@ -67,8 +149,41 @@ priv_flow_validate(struct priv *priv,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)priv;
 	const struct rte_flow_item *ilast = NULL;
+	const struct rte_flow_item_eth eth_mask = {
+		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+	};
+	const struct rte_flow_item_ipv4 ipv4_mask = {
+		.hdr = {
+			.src_addr = -1,
+			.dst_addr = -1,
+		},
+	};
+	const struct rte_flow_item_ipv6 ipv6_mask = {
+		.hdr = {
+			.src_addr = {
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			},
+			.dst_addr = {
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			},
+		},
+	};
+	const struct rte_flow_item_udp udp_mask = {
+		.hdr = {
+			.src_port = -1,
+			.dst_port = -1,
+		},
+	};
+	const struct rte_flow_item_tcp tcp_mask = {
+		.hdr = {
+			.src_port = -1,
+			.dst_port = -1,
+		},
+	};
 
 	if (attr->group) {
 		rte_flow_error_set(error, ENOTSUP,
@@ -99,38 +214,93 @@ priv_flow_validate(struct priv *priv,
 		return -rte_errno;
 	}
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		int err = 0;
+
 		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
 			continue;
 		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
 			if (ilast)
 				goto exit_item_not_supported;
 			ilast = items;
-		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
-			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&eth_mask,
+					sizeof(eth_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+				goto exit_item_not_supported;
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&ipv4_mask,
+					sizeof(ipv4_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
 				goto exit_item_not_supported;
 			ilast = items;
-		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
-			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&ipv6_mask,
+					sizeof(ipv6_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
+				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
+				goto exit_item_not_supported;
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&udp_mask,
+					sizeof(udp_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
 				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
 				goto exit_item_not_supported;
 			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&tcp_mask,
+					sizeof(tcp_mask));
+			if (err)
+				goto exit_item_not_supported;
 		} else {
 			goto exit_item_not_supported;
 		}
 	}
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID ||
-		    actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
-		    actions->type == RTE_FLOW_ACTION_TYPE_DROP)
+		    actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
 			continue;
-		else
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			const struct rte_flow_action_queue *queue =
+				(const struct rte_flow_action_queue *)
+				actions->conf;
+
+			if (!queue || (queue->index > (priv->rxqs_n - 1))) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "queue index error");
+				goto exit;
+			}
+		} else {
 			goto exit_action_not_supported;
+		}
 	}
 	return 0;
 exit_item_not_supported:
@@ -140,6 +310,7 @@ priv_flow_validate(struct priv *priv,
 exit_action_not_supported:
 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
 			   actions, "action not supported");
+exit:
 	return -rte_errno;
 }
 
@@ -166,6 +337,476 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 }
 
 /**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param eth[in, out]
+ *   Verbs Ethernet specification structure.
+ */
+static void
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_eth *eth)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	unsigned int i;
+
+	*eth = (struct ibv_exp_flow_spec_eth) {
+		.type = IBV_EXP_FLOW_SPEC_ETH,
+		.size = sizeof(struct ibv_exp_flow_spec_eth),
+	};
+	if (spec) {
+		memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	if (mask) {
+		memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+		eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+		eth->val.src_mac[i] &= eth->mask.src_mac[i];
+	}
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param ipv4[in, out]
+ *   Verbs IPv4 specification structure.
+ */
+static void
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_ipv4 *ipv4)
+{
+	const struct rte_flow_item_ipv4 *spec = item->spec;
+	const struct rte_flow_item_ipv4 *mask = item->mask;
+
+	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
+		.type = IBV_EXP_FLOW_SPEC_IPV4,
+		.size = sizeof(struct ibv_exp_flow_spec_ipv4),
+	};
+	if (spec) {
+		ipv4->val = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = spec->hdr.src_addr,
+			.dst_ip = spec->hdr.dst_addr,
+		};
+	}
+	if (mask) {
+		ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = mask->hdr.src_addr,
+			.dst_ip = mask->hdr.dst_addr,
+		};
+	}
+	/* Remove unwanted bits from values. */
+	ipv4->val.src_ip &= ipv4->mask.src_ip;
+	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+}
+
+/**
+ * Convert IPv6 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param ipv6[in, out]
+ *   Verbs IPv6 specification structure.
+ */
+static void
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_ipv6 *ipv6)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	unsigned int i;
+
+	*ipv6 = (struct ibv_exp_flow_spec_ipv6) {
+		.type = IBV_EXP_FLOW_SPEC_IPV6,
+		.size = sizeof(struct ibv_exp_flow_spec_ipv6),
+	};
+	if (spec) {
+		memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
+		       RTE_DIM(ipv6->val.src_ip));
+		memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
+		       RTE_DIM(ipv6->val.dst_ip));
+	}
+	if (mask) {
+		memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
+		       RTE_DIM(ipv6->mask.src_ip));
+		memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
+		       RTE_DIM(ipv6->mask.dst_ip));
+	}
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
+		ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
+		ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+	}
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param udp[in, out]
+ *   Verbs UDP specification structure.
+ */
+static void
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_tcp_udp *udp)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+
+	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_UDP,
+		.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	};
+	udp->type = IBV_EXP_FLOW_SPEC_UDP;
+	if (spec) {
+		udp->val.dst_port = spec->hdr.dst_port;
+		udp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		udp->mask.dst_port = mask->hdr.dst_port;
+		udp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	udp->val.src_port &= udp->mask.src_port;
+	udp->val.dst_port &= udp->mask.dst_port;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param tcp[in, out]
+ *   Verbs TCP specification structure.
+ */
+static void
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+		     struct ibv_exp_flow_spec_tcp_udp *tcp)
+{
+	const struct rte_flow_item_tcp *spec = item->spec;
+	const struct rte_flow_item_tcp *mask = item->mask;
+
+	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_TCP,
+		.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	};
+	tcp->type = IBV_EXP_FLOW_SPEC_TCP;
+	if (spec) {
+		tcp->val.dst_port = spec->hdr.dst_port;
+		tcp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		tcp->mask.dst_port = mask->hdr.dst_port;
+		tcp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	tcp->val.src_port &= tcp->mask.src_port;
+	tcp->val.dst_port &= tcp->mask.dst_port;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ * @param  ibv_attr
+ *   Verbs flow attributes.
+ * @param  queue
+ *   Destination queue.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+			      struct ibv_exp_flow_attr *ibv_attr,
+			      uint32_t queue,
+			      struct rte_flow_error *error)
+{
+	struct rxq_ctrl *rxq;
+	struct rte_flow *rte_flow;
+
+	assert(priv->pd);
+	assert(priv->ctx);
+	rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+	if (!rte_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate flow memory");
+		return NULL;
+	}
+	if (queue == MLX5_FLOW_DROP_QUEUE) {
+		rte_flow->drop = 1;
+		rte_flow->cq =
+			ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+					  &(struct ibv_exp_cq_init_attr){
+						  .comp_mask = 0,
+					  });
+		if (!rte_flow->cq) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   NULL, "cannot allocate CQ");
+			goto error;
+		}
+		rte_flow->wq = ibv_exp_create_wq(
+			priv->ctx,
+			&(struct ibv_exp_wq_init_attr){
+				.wq_type = IBV_EXP_WQT_RQ,
+				.max_recv_wr = 1,
+				.max_recv_sge = 1,
+				.pd = priv->pd,
+				.cq = rte_flow->cq,
+			});
+	} else {
+		rxq = container_of((*priv->rxqs)[queue], struct rxq_ctrl, rxq);
+		rte_flow->drop = 0;
+		rte_flow->wq = rxq->wq;
+	}
+	rte_flow->ibv_attr = ibv_attr;
+	rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
+		priv->ctx,
+		&(struct ibv_exp_rwq_ind_table_init_attr){
+			.pd = priv->pd,
+			.log_ind_tbl_size = 0,
+			.ind_tbl = &rte_flow->wq,
+			.comp_mask = 0,
+		});
+	if (!rte_flow->ind_table) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate indirection table");
+		goto error;
+	}
+	rte_flow->qp = ibv_exp_create_qp(
+		priv->ctx,
+		&(struct ibv_exp_qp_init_attr){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_EXP_QP_INIT_ATTR_PD |
+				IBV_EXP_QP_INIT_ATTR_PORT |
+				IBV_EXP_QP_INIT_ATTR_RX_HASH,
+			.pd = priv->pd,
+			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+				.rx_hash_function =
+					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_hash_default_key_len,
+				.rx_hash_key = rss_hash_default_key,
+				.rx_hash_fields_mask = 0,
+				.rwq_ind_tbl = rte_flow->ind_table,
+			},
+			.port_num = priv->port,
+		});
+	if (!rte_flow->qp) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "cannot allocate QP");
+		goto error;
+	}
+	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+						 rte_flow->ibv_attr);
+	if (!rte_flow->ibv_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, "flow rule creation failure");
+		goto error;
+	}
+	return rte_flow;
+error:
+	assert(rte_flow);
+	if (rte_flow->qp)
+		ibv_destroy_qp(rte_flow->qp);
+	if (rte_flow->ind_table)
+		ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
+	if (rte_flow->drop && rte_flow->wq)
+		ibv_exp_destroy_wq(rte_flow->wq);
+	if (rte_flow->drop && rte_flow->cq)
+		ibv_destroy_cq(rte_flow->cq);
+	rte_free(rte_flow->ibv_attr);
+	rte_free(rte_flow);
+	return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   a flow on success, null otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct rte_flow *rte_flow = NULL;
+	struct ibv_exp_flow_attr *ibv_attr;
+	unsigned int flow_size = sizeof(struct ibv_exp_flow_attr);
+	struct action {
+		int queue;
+		int drop;
+	} action;
+	unsigned int queue = MLX5_FLOW_DROP_QUEUE;
+
+	if (priv_flow_validate(priv, attr, items, actions, error))
+		goto exit;
+	ibv_attr = rte_malloc(__func__, flow_size, 0);
+	if (!ibv_attr) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate ibv_attr memory");
+		goto exit;
+	}
+	*ibv_attr = (struct ibv_exp_flow_attr){
+		.type = IBV_EXP_FLOW_ATTR_NORMAL,
+		.size = sizeof(struct ibv_exp_flow_attr),
+		.priority = attr->priority,
+		.num_of_specs = 0,
+		.port = 0,
+		.flags = 0,
+		.reserved = 0,
+	};
+	/* Update ibv_flow_spec. */
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
+			continue;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+			struct ibv_exp_flow_spec_eth *eth;
+			unsigned int eth_size =
+				sizeof(struct ibv_exp_flow_spec_eth);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + eth_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			eth = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_eth(items, eth);
+			flow_size += eth_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 2;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			struct ibv_exp_flow_spec_ipv4 *ipv4;
+			unsigned int ipv4_size =
+				sizeof(struct ibv_exp_flow_spec_ipv4);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + ipv4_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			ipv4 = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_ipv4(items, ipv4);
+			flow_size += ipv4_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 1;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+			struct ibv_exp_flow_spec_ipv6 *ipv6;
+			unsigned int ipv6_size =
+				sizeof(struct ibv_exp_flow_spec_ipv6);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + ipv6_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			ipv6 = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_ipv6(items, ipv6);
+			flow_size += ipv6_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 1;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {
+			struct ibv_exp_flow_spec_tcp_udp *udp;
+			unsigned int udp_size =
+				sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + udp_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			udp = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_udp(items, udp);
+			flow_size += udp_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 0;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {
+			struct ibv_exp_flow_spec_tcp_udp *tcp;
+			unsigned int tcp_size =
+				sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+			ibv_attr = rte_realloc(ibv_attr,
+					       flow_size + tcp_size, 0);
+			if (!ibv_attr)
+				goto error_no_memory;
+			tcp = (void *)((uintptr_t)ibv_attr + flow_size);
+			mlx5_flow_create_tcp(items, tcp);
+			flow_size += tcp_size;
+			++ibv_attr->num_of_specs;
+			ibv_attr->priority = 0;
+		} else {
+			/* This default rule should not happen. */
+			rte_free(ibv_attr);
+			rte_flow_error_set(
+				error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+				items, "unsupported item");
+			goto exit;
+		}
+	}
+	action = (struct action) {
+		.queue = -1,
+		.drop = 0,
+	};
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			action.queue = ((const struct rte_flow_action_queue *)
+					actions->conf)->index;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+			action.drop = 1;
+		} else {
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "unsupported action");
+			goto exit;
+		}
+	}
+	if (action.queue >= 0) {
+		queue = action.queue;
+	} else if (action.drop) {
+		queue = MLX5_FLOW_DROP_QUEUE;
+	} else {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions,
+				   "no possible action found");
+		goto exit;
+	}
+	rte_flow = priv_flow_create_action_queue(priv, ibv_attr, queue, error);
+	return rte_flow;
+error_no_memory:
+	rte_flow_error_set(error, ENOMEM,
+			   RTE_FLOW_ERROR_TYPE_ITEM,
+			   items,
+			   "cannot allocate memory");
+exit:
+	return NULL;
+}
+
+/**
  * Create a flow.
  *
  * @see rte_flow_create()
@@ -182,12 +823,11 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 	struct rte_flow *flow;
 
 	priv_lock(priv);
-	if (priv_flow_validate(priv, attr, items, actions, error)) {
-		priv_unlock(priv);
-		return NULL;
+	flow = priv_flow_create(priv, attr, items, actions, error);
+	if (flow) {
+		LIST_INSERT_HEAD(&priv->flows, flow, next);
+		DEBUG("Flow created %p", (void *)flow);
 	}
-	flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
-	LIST_INSERT_HEAD(&priv->flows, flow, next);
 	priv_unlock(priv);
 	return flow;
 }
@@ -206,6 +846,20 @@ priv_flow_destroy(struct priv *priv,
 {
 	(void)priv;
 	LIST_REMOVE(flow, next);
+	if (flow->ibv_flow)
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+	if (flow->qp)
+		claim_zero(ibv_destroy_qp(flow->qp));
+	if (flow->ind_table)
+		claim_zero(
+			ibv_exp_destroy_rwq_ind_table(
+				flow->ind_table));
+	if (flow->drop && flow->wq)
+		claim_zero(ibv_exp_destroy_wq(flow->wq));
+	if (flow->drop && flow->cq)
+		claim_zero(ibv_destroy_cq(flow->cq));
+	rte_free(flow->ibv_attr);
+	DEBUG("Flow destroyed %p", (void *)flow);
 	rte_free(flow);
 }
 
@@ -235,7 +889,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
  * @param  priv
  *   Pointer to private structure.
  */
-void
+static void
 priv_flow_flush(struct priv *priv)
 {
 	while (!LIST_EMPTY(&priv->flows)) {
@@ -264,3 +918,54 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
 	priv_unlock(priv);
 	return 0;
 }
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ */
+void
+priv_flow_remove(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+		flow->ibv_flow = NULL;
+		DEBUG("Flow %p removed", (void *)flow);
+	}
+}
+
+/**
+ * Add all flows.
+ *
+ * @param  priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+priv_flow_apply(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		flow->ibv_flow = ibv_exp_create_flow(flow->qp,
+						     flow->ibv_attr);
+		if (!flow->ibv_flow) {
+			DEBUG("Flow %p cannot be applied", (void *)flow);
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+		DEBUG("Flow %p applied", (void *)flow);
+	}
+	return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 4a359d7..e17960e 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -90,7 +90,9 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
 		priv_fdir_enable(priv);
 	priv_dev_interrupt_handler_install(priv, dev);
-	LIST_INIT(&priv->flows);
+	if (LIST_EMPTY(&priv->flows))
+		LIST_INIT(&priv->flows);
+	err = priv_flow_apply(priv);
 	priv_unlock(priv);
 	return -err;
 }
@@ -121,7 +123,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv_mac_addrs_disable(priv);
 	priv_destroy_hash_rxqs(priv);
 	priv_fdir_disable(priv);
-	priv_flow_flush(priv);
+	priv_flow_remove(priv);
 	priv_dev_interrupt_handler_uninstall(priv, dev);
 	priv->started = 0;
 	priv_unlock(priv);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v3 4/4] net/mlx5: add VLAN filter support in rte_flow
  2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
                     ` (3 preceding siblings ...)
  2016-12-21 15:19   ` [PATCH v3 3/4] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
@ 2016-12-21 15:19   ` Nelio Laranjeiro
  4 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-21 15:19 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 59 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 57 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a33c568..2478fb6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -184,6 +184,9 @@ priv_flow_validate(struct priv *priv,
 			.dst_port = -1,
 		},
 	};
+	const struct rte_flow_item_vlan vlan_mask = {
+		.tci = -1,
+	};
 
 	if (attr->group) {
 		rte_flow_error_set(error, ENOTSUP,
@@ -228,11 +231,32 @@ priv_flow_validate(struct priv *priv,
 					sizeof(eth_mask));
 			if (err)
 				goto exit_item_not_supported;
-		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
 			if (!ilast)
 				goto exit_item_not_supported;
 			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
 				goto exit_item_not_supported;
+			if (((const struct rte_flow_item_vlan *)items)->tci >
+			    ETHER_MAX_VLAN_ID) {
+				rte_flow_error_set(error, ENOTSUP,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   items,
+						   "wrong VLAN tci value");
+				goto exit;
+			}
+			ilast = items;
+			err = mlx5_flow_item_validate(
+					items,
+					(const uint8_t *)&vlan_mask,
+					sizeof(vlan_mask));
+			if (err)
+				goto exit_item_not_supported;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			if (!ilast)
+				goto exit_item_not_supported;
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH &&
+				 ilast->type != RTE_FLOW_ITEM_TYPE_VLAN)
+				goto exit_item_not_supported;
 			ilast = items;
 			err = mlx5_flow_item_validate(
 					items,
@@ -243,7 +267,8 @@ priv_flow_validate(struct priv *priv,
 		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {
 			if (!ilast)
 				goto exit_item_not_supported;
-			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
+			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH &&
+				 ilast->type != RTE_FLOW_ITEM_TYPE_VLAN)
 				goto exit_item_not_supported;
 			ilast = items;
 			err = mlx5_flow_item_validate(
@@ -372,6 +397,28 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param eth[in, out]
+ *   Verbs Ethernet specification structure.
+ */
+static void
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+		      struct ibv_exp_flow_spec_eth *eth)
+{
+	const struct rte_flow_item_vlan *spec = item->spec;
+	const struct rte_flow_item_vlan *mask = item->mask;
+
+	if (spec)
+		eth->val.vlan_tag = spec->tci;
+	if (mask)
+		eth->mask.vlan_tag = mask->tci;
+	eth->val.vlan_tag &= eth->mask.vlan_tag;
+}
+
+/**
  * Convert IPv4 item to Verbs specification.
  *
  * @param item[in]
@@ -700,6 +747,14 @@ priv_flow_create(struct priv *priv,
 			flow_size += eth_size;
 			++ibv_attr->num_of_specs;
 			ibv_attr->priority = 2;
+		} else if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+			struct ibv_exp_flow_spec_eth *eth;
+			unsigned int eth_size =
+				sizeof(struct ibv_exp_flow_spec_eth);
+
+			eth = (void *)((uintptr_t)ibv_attr + flow_size -
+				       eth_size);
+			mlx5_flow_create_vlan(items, eth);
 		} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {
 			struct ibv_exp_flow_spec_ipv4 *ipv4;
 			unsigned int ipv4_size =
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* Re: [PATCH v3 2/4] net/mlx5: add software support for rte_flow
  2016-12-21 15:19   ` [PATCH v3 2/4] net/mlx5: add software " Nelio Laranjeiro
@ 2016-12-23 12:19     ` Ferruh Yigit
  2016-12-23 13:24       ` Adrien Mazarguil
  0 siblings, 1 reply; 38+ messages in thread
From: Ferruh Yigit @ 2016-12-23 12:19 UTC (permalink / raw)
  To: Nelio Laranjeiro, dev; +Cc: Adrien Mazarguil

On 12/21/2016 3:19 PM, Nelio Laranjeiro wrote:
> Introduce initial software validation for rte_flow rules.
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
>  drivers/net/mlx5/mlx5.h         |   2 +
>  drivers/net/mlx5/mlx5_flow.c    | 202 ++++++++++++++++++++++++++++++++++------
>  drivers/net/mlx5/mlx5_trigger.c |   2 +
>  3 files changed, 177 insertions(+), 29 deletions(-)

<...>

> +	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
> +		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
> +			continue;
> +		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
> +			if (ilast)
> +				goto exit_item_not_supported;
> +			ilast = items;
> +		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
> +			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
> +			if (!ilast)
> +				goto exit_item_not_supported;
> +			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
> +				goto exit_item_not_supported;
> +			ilast = items;
> +		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
> +			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
> +			if (!ilast)
> +				goto exit_item_not_supported;
> +			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
> +				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
> +				goto exit_item_not_supported;
> +			ilast = items;
> +		} else {
> +			goto exit_item_not_supported;
> +		}
> +	}

I was thinking rte_flow_validate() is validating rule against hardware /
PMD, but here the API input validation is also done.
In patch 3/4 API input validation continues with validating each item
one by one.

Shouldn't each PMD needs to do this kind of input validation?
Why not move generic input validation to rte_flow API?
And if it is valid, call PMD specific one.

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v3 3/4] net/mlx5: add rte_flow rule creation
  2016-12-21 15:19   ` [PATCH v3 3/4] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
@ 2016-12-23 12:21     ` Ferruh Yigit
  2016-12-26 12:20       ` Nélio Laranjeiro
  0 siblings, 1 reply; 38+ messages in thread
From: Ferruh Yigit @ 2016-12-23 12:21 UTC (permalink / raw)
  To: Nelio Laranjeiro, dev; +Cc: Adrien Mazarguil

On 12/21/2016 3:19 PM, Nelio Laranjeiro wrote:
> Convert Ethernet, IPv4, IPv6, TCP, UDP layers into ibv_flow and create
> those rules when after validation (i.e. NIC supports the rule).
> 
> VLAN is still not supported in this commit.
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>

<...>

> +static struct rte_flow *
> +priv_flow_create(struct priv *priv,
> +		 const struct rte_flow_attr *attr,
> +		 const struct rte_flow_item items[],
> +		 const struct rte_flow_action actions[],
> +		 struct rte_flow_error *error)
> +{
> +	struct rte_flow *rte_flow = NULL;

Unnecessary assignment.

<...>

> +	action = (struct action) {
> +		.queue = -1,
> +		.drop = 0,
> +	};
> +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
> +		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
> +			continue;
> +		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> +			action.queue = ((const struct rte_flow_action_queue *)
> +					actions->conf)->index;
> +		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
> +			action.drop = 1;
> +		} else {
> +			rte_flow_error_set(error, ENOTSUP,
> +					   RTE_FLOW_ERROR_TYPE_ACTION,
> +					   actions, "unsupported action");
> +			goto exit;
> +		}
> +	}
> +	if (action.queue >= 0) {
> +		queue = action.queue;
> +	} else if (action.drop) {
> +		queue = MLX5_FLOW_DROP_QUEUE;
> +	} else {

Not really so important, but as a note, ACTION_TYPE_VOID hits here. It
pass from validation, but gives error in creation.

> +		rte_flow_error_set(error, ENOTSUP,
> +				   RTE_FLOW_ERROR_TYPE_ACTION,
> +				   actions,
> +				   "no possible action found");
> +		goto exit;
> +	}

<...>

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v3 2/4] net/mlx5: add software support for rte_flow
  2016-12-23 12:19     ` Ferruh Yigit
@ 2016-12-23 13:24       ` Adrien Mazarguil
  0 siblings, 0 replies; 38+ messages in thread
From: Adrien Mazarguil @ 2016-12-23 13:24 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Nelio Laranjeiro, dev

On Fri, Dec 23, 2016 at 12:19:30PM +0000, Ferruh Yigit wrote:
> On 12/21/2016 3:19 PM, Nelio Laranjeiro wrote:
> > Introduce initial software validation for rte_flow rules.
> > 
> > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> > ---
> >  drivers/net/mlx5/mlx5.h         |   2 +
> >  drivers/net/mlx5/mlx5_flow.c    | 202 ++++++++++++++++++++++++++++++++++------
> >  drivers/net/mlx5/mlx5_trigger.c |   2 +
> >  3 files changed, 177 insertions(+), 29 deletions(-)
> 
> <...>
> 
> > +	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
> > +		if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
> > +			continue;
> > +		} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {
> > +			if (ilast)
> > +				goto exit_item_not_supported;
> > +			ilast = items;
> > +		} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||
> > +			   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {
> > +			if (!ilast)
> > +				goto exit_item_not_supported;
> > +			else if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)
> > +				goto exit_item_not_supported;
> > +			ilast = items;
> > +		} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||
> > +			   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {
> > +			if (!ilast)
> > +				goto exit_item_not_supported;
> > +			else if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&
> > +				 (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))
> > +				goto exit_item_not_supported;
> > +			ilast = items;
> > +		} else {
> > +			goto exit_item_not_supported;
> > +		}
> > +	}
> 
> I was thinking rte_flow_validate() is validating rule against hardware /
> PMD, but here the API input validation is also done.
> In patch 3/4 API input validation continues with validating each item
> one by one.
> 
> Shouldn't each PMD needs to do this kind of input validation?
> Why not move generic input validation to rte_flow API?
> And if it is valid, call PMD specific one.

I think we'll add one eventually, but such a generic function would be
called by PMDs not by applications. PMDs must have the ability to optimize
validate() and create() however they want.

In the meantime in my opinion it's better to let PMDs implement their own to
determine what can be shared later without cluttering rte_flow from the
start.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v3 3/4] net/mlx5: add rte_flow rule creation
  2016-12-23 12:21     ` Ferruh Yigit
@ 2016-12-26 12:20       ` Nélio Laranjeiro
  0 siblings, 0 replies; 38+ messages in thread
From: Nélio Laranjeiro @ 2016-12-26 12:20 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, Adrien Mazarguil

On Fri, Dec 23, 2016 at 12:21:10PM +0000, Ferruh Yigit wrote:
> On 12/21/2016 3:19 PM, Nelio Laranjeiro wrote:
> > Convert Ethernet, IPv4, IPv6, TCP, UDP layers into ibv_flow and create
> > those rules when after validation (i.e. NIC supports the rule).
> > 
> > VLAN is still not supported in this commit.
> > 
> > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> 
> <...>
> 
> > +static struct rte_flow *
> > +priv_flow_create(struct priv *priv,
> > +		 const struct rte_flow_attr *attr,
> > +		 const struct rte_flow_item items[],
> > +		 const struct rte_flow_action actions[],
> > +		 struct rte_flow_error *error)
> > +{
> > +	struct rte_flow *rte_flow = NULL;
> 
> Unnecessary assignment.
> 
> <...>
> 
> > +	action = (struct action) {
> > +		.queue = -1,
> > +		.drop = 0,
> > +	};
> > +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
> > +		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
> > +			continue;
> > +		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> > +			action.queue = ((const struct rte_flow_action_queue *)
> > +					actions->conf)->index;
> > +		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
> > +			action.drop = 1;
> > +		} else {
> > +			rte_flow_error_set(error, ENOTSUP,
> > +					   RTE_FLOW_ERROR_TYPE_ACTION,
> > +					   actions, "unsupported action");
> > +			goto exit;
> > +		}
> > +	}
> > +	if (action.queue >= 0) {
> > +		queue = action.queue;
> > +	} else if (action.drop) {
> > +		queue = MLX5_FLOW_DROP_QUEUE;
> > +	} else {
> 
> Not really so important, but as a note, ACTION_TYPE_VOID hits here. It
> pass from validation, but gives error in creation.
> 
> > +		rte_flow_error_set(error, ENOTSUP,
> > +				   RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   actions,
> > +				   "no possible action found");
> > +		goto exit;
> > +	}
> 
> <...>

Hi Ferruh,

I will send (very soon) a v4 to handle this situation.

Regards,

-- 
Nélio Laranjeiro
6WIND

^ permalink raw reply	[flat|nested] 38+ messages in thread

* [PATCH v4 0/6] net/mlx5: support flow API
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
@ 2016-12-28 10:37     ` Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 " Nelio Laranjeiro
                         ` (6 more replies)
  2016-12-28 10:37     ` [PATCH v4 1/6] net/mlx5: add preliminary flow API support Nelio Laranjeiro
                       ` (5 subsequent siblings)
  6 siblings, 7 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-28 10:37 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Changes in v4:

 - Simplify flow parsing by using a graph.
 - Add VXLAN flow item.
 - Add mark flow action.
 - Extend IPv4 filter item (Type of service, Next Protocol ID).

Changes in v3:

 - Fix Ethernet ether type issue.

Changes in v2:

 - Fix several issues.
 - Support VLAN filtering.


Nelio Laranjeiro (6):
  net/mlx5: add preliminary flow API support
  net/mlx5: support basic flow items and actions
  net/mlx5: support VLAN flow item
  net/mlx5: support VXLAN flow item
  net/mlx5: support mark flow action
  net/mlx5: extend IPv4 flow item

 drivers/net/mlx5/Makefile       |    1 +
 drivers/net/mlx5/mlx5.h         |   19 +
 drivers/net/mlx5/mlx5_fdir.c    |   15 +
 drivers/net/mlx5/mlx5_flow.c    | 1192 +++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_prm.h     |   70 ++-
 drivers/net/mlx5/mlx5_rxtx.c    |   12 +-
 drivers/net/mlx5/mlx5_rxtx.h    |    3 +-
 drivers/net/mlx5/mlx5_trigger.c |    2 +
 8 files changed, 1311 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

-- 
2.1.4

^ permalink raw reply	[flat|nested] 38+ messages in thread

* [PATCH v4 1/6] net/mlx5: add preliminary flow API support
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
@ 2016-12-28 10:37     ` Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 2/6] net/mlx5: support basic flow items and actions Nelio Laranjeiro
                       ` (4 subsequent siblings)
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-28 10:37 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/Makefile    |   1 +
 drivers/net/mlx5/mlx5.h      |  16 ++++++
 drivers/net/mlx5/mlx5_fdir.c |  15 ++++++
 drivers/net/mlx5/mlx5_flow.c | 124 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 156 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index cf87f0b..6d1338a 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -48,6 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
 
 # Dependencies.
 DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a60..04f4eaa 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -59,6 +59,7 @@
 #include <rte_spinlock.h>
 #include <rte_interrupts.h>
 #include <rte_errno.h>
+#include <rte_flow.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -268,4 +269,19 @@ void priv_fdir_enable(struct priv *);
 int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
 			 enum rte_filter_op, void *);
 
+/* mlx5_flow.c */
+
+int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
+		       const struct rte_flow_item [],
+		       const struct rte_flow_action [],
+		       struct rte_flow_error *);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
+				  const struct rte_flow_attr *,
+				  const struct rte_flow_item [],
+				  const struct rte_flow_action [],
+				  struct rte_flow_error *);
+int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
+		      struct rte_flow_error *);
+int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf682..f80c58b 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -55,6 +55,8 @@
 #include <rte_malloc.h>
 #include <rte_ethdev.h>
 #include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
 	return ret;
 }
 
+static const struct rte_flow_ops mlx5_flow_ops = {
+	.validate = mlx5_flow_validate,
+	.create = mlx5_flow_create,
+	.destroy = mlx5_flow_destroy,
+	.flush = mlx5_flow_flush,
+	.query = NULL,
+};
+
 /**
  * Manage filter operations.
  *
@@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 	struct priv *priv = dev->data->dev_private;
 
 	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &mlx5_flow_ops;
+		return 0;
 	case RTE_ETH_FILTER_FDIR:
 		priv_lock(priv);
 		ret = priv_fdir_ctrl_func(priv, filter_op, arg);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 0000000..4fdefa0
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,124 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 6WIND S.A.
+ *   Copyright 2016 Mellanox.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of 6WIND S.A. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "mlx5.h"
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return NULL;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)flow;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v4 2/6] net/mlx5: support basic flow items and actions
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 1/6] net/mlx5: add preliminary flow API support Nelio Laranjeiro
@ 2016-12-28 10:37     ` Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 3/6] net/mlx5: support VLAN flow item Nelio Laranjeiro
                       ` (3 subsequent siblings)
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-28 10:37 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Introduce initial software for rte_flow rules.

VLAN, VXLAN are still not supported.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5.h         |   3 +
 drivers/net/mlx5/mlx5_flow.c    | 928 ++++++++++++++++++++++++++++++++++++++--
 drivers/net/mlx5/mlx5_trigger.c |   2 +
 3 files changed, 904 insertions(+), 29 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f4eaa..c415ce3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -136,6 +136,7 @@ struct priv {
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
 	struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
+	LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	rte_spinlock_t lock; /* Lock for control functions. */
 };
@@ -283,5 +284,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
 int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
 		      struct rte_flow_error *);
 int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+int priv_flow_start(struct priv *);
+void priv_flow_stop(struct priv *);
 
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4fdefa0..ebae2b5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -31,12 +31,380 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <sys/queue.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
 #include <rte_ethdev.h>
 #include <rte_flow.h>
 #include <rte_flow_driver.h>
+#include <rte_malloc.h>
 
 #include "mlx5.h"
 
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item, void *data);
+
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item, void *data);
+
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item, void *data);
+
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item, void *data);
+
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item, void *data);
+
+struct rte_flow {
+	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+	struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+	struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+	struct ibv_qp *qp; /**< Verbs queue pair. */
+	struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
+	struct ibv_exp_wq *wq; /**< Verbs work queue. */
+	struct ibv_cq *cq; /**< Verbs completion queue. */
+	struct rxq *rxq; /**< Pointer to the queue, NULL if drop queue. */
+};
+
+/** Static initializer for items. */
+#define ITEMS(...) \
+	(const enum rte_flow_item_type []){ \
+		__VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+	}
+
+/** Structure to generate a simple graph of layers supported by the NIC. */
+struct mlx5_flow_items {
+	/** List of possible following items.  */
+	const enum rte_flow_item_type *const items;
+	/** List of possible actions for these items. */
+	const enum rte_flow_action_type *const actions;
+	/** Bit-masks corresponding to the possibilities for the item. */
+	const void *mask;
+	/** Bit-masks size in bytes. */
+	const unsigned int mask_sz;
+	/**
+	 * Conversion function from rte_flow to NIC specific flow.
+	 *
+	 * @param item
+	 *   rte_flow item to convert.
+	 * @param data
+	 *   Internal structure to store the conversion.
+	 *
+	 * @return
+	 *   0 on success, negative value otherwise.
+	 */
+	int (*convert)(const struct rte_flow_item *item, void *data);
+	/** Size in bytes of the destination structure. */
+	const unsigned int dst_sz;
+};
+
+/** Valid action for this PMD. */
+static const enum rte_flow_action_type valid_actions[] = {
+	RTE_FLOW_ACTION_TYPE_DROP,
+	RTE_FLOW_ACTION_TYPE_QUEUE,
+	RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Graph of supported items and associated actions. */
+static const struct mlx5_flow_items mlx5_flow_items[] = {
+	[RTE_FLOW_ITEM_TYPE_VOID] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VOID,
+			       RTE_FLOW_ITEM_TYPE_ETH),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_eth){
+			.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+			.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+		},
+		.mask_sz = sizeof(struct rte_flow_item_eth),
+		.convert = mlx5_flow_create_eth,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+	},
+	[RTE_FLOW_ITEM_TYPE_ETH] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+			       RTE_FLOW_ITEM_TYPE_IPV6),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_eth){
+			.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+			.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+		},
+		.mask_sz = sizeof(struct rte_flow_item_eth),
+		.convert = mlx5_flow_create_eth,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+	},
+	[RTE_FLOW_ITEM_TYPE_IPV4] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+			       RTE_FLOW_ITEM_TYPE_TCP),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_ipv4){
+			.hdr = {
+				.src_addr = -1,
+				.dst_addr = -1,
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_ipv4),
+		.convert = mlx5_flow_create_ipv4,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4),
+	},
+	[RTE_FLOW_ITEM_TYPE_IPV6] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+			       RTE_FLOW_ITEM_TYPE_TCP),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_ipv6){
+			.hdr = {
+				.src_addr = {
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+				},
+				.dst_addr = {
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+				},
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_ipv6),
+		.convert = mlx5_flow_create_ipv6,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6),
+	},
+	[RTE_FLOW_ITEM_TYPE_UDP] = {
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_udp){
+			.hdr = {
+				.src_port = -1,
+				.dst_port = -1,
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_udp),
+		.convert = mlx5_flow_create_udp,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	},
+	[RTE_FLOW_ITEM_TYPE_TCP] = {
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_tcp){
+			.hdr = {
+				.src_port = -1,
+				.dst_port = -1,
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_tcp),
+		.convert = mlx5_flow_create_tcp,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	},
+};
+
+/** Structure to pass to the conversion function. */
+struct mlx5_flow {
+	struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
+	unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+};
+
+struct mlx5_flow_action {
+	uint32_t queue:1; /**< Target is a receive queue. */
+	uint32_t drop:1; /**< Target is a drop queue. */
+	uint32_t queue_id; /**< Identifier of the queue. */
+};
+
+/**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param mask[in]
+ *   Bit-masks covering supported fields to compare with spec, last and mask in
+ *   \item.
+ * @param size
+ *   Bit-Mask size in bytes.
+ *
+ * @return
+ *   0 on success.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+			const uint8_t *mask, unsigned int size)
+{
+	int ret = 0;
+
+	if (item->spec && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->spec;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->last && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->last;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->mask;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->spec && item->last) {
+		uint8_t spec[size];
+		uint8_t last[size];
+		const uint8_t *apply = mask;
+		unsigned int i;
+
+		if (item->mask)
+			apply = item->mask;
+		for (i = 0; i < size; ++i) {
+			spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+			last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+		}
+		ret = memcmp(spec, last, size);
+	}
+	return ret;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ *   Flow structure to update.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error,
+		   struct mlx5_flow *flow)
+{
+	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+	struct mlx5_flow_action action = {
+		.queue = 0,
+		.drop = 0,
+	};
+
+	(void)priv;
+	if (attr->group) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   NULL,
+				   "groups are not supported");
+		return -rte_errno;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   NULL,
+				   "priorities are not supported");
+		return -rte_errno;
+	}
+	if (attr->egress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   NULL,
+				   "egress is not supported");
+		return -rte_errno;
+	}
+	if (!attr->ingress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   NULL,
+				   "only ingress is supported");
+		return -rte_errno;
+	}
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		const struct mlx5_flow_items *token = NULL;
+		unsigned int i;
+		int err;
+
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+			continue;
+		for (i = 0;
+		     cur_item->items &&
+		     cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+		     ++i) {
+			if (cur_item->items[i] == items->type) {
+				token = &mlx5_flow_items[items->type];
+				break;
+			}
+		}
+		if (!token)
+			goto exit_item_not_supported;
+		cur_item = token;
+		err = mlx5_flow_item_validate(items,
+					      (const uint8_t *)cur_item->mask,
+					      sizeof(cur_item->mask_sz));
+		if (err)
+			goto exit_item_not_supported;
+		if (flow->ibv_attr && cur_item->convert) {
+			err = cur_item->convert(items, flow);
+			if (err)
+				goto exit_item_not_supported;
+		}
+		flow->offset += cur_item->dst_sz;
+	}
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+			action.drop = 1;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			const struct rte_flow_action_queue *queue =
+				(const struct rte_flow_action_queue *)
+				actions->conf;
+
+			if (!queue || (queue->index > (priv->rxqs_n - 1)))
+				goto exit_action_not_supported;
+			action.queue = 1;
+		} else {
+			goto exit_action_not_supported;
+		}
+	}
+	if (!action.queue && !action.drop) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "no valid action");
+		return -rte_errno;
+	}
+	return 0;
+exit_item_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+			   items, "item not supported");
+	return -rte_errno;
+exit_action_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+			   actions, "action not supported");
+	return -rte_errno;
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -50,15 +418,417 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	struct priv *priv = dev->data->dev_private;
+	int ret;
+	struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
+
+	priv_lock(priv);
+	ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
+	priv_unlock(priv);
+	return ret;
+}
+
+/**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item, void *data)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_eth *eth;
+	const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+	unsigned int i;
+
+	eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*eth = (struct ibv_exp_flow_spec_eth) {
+		.type = IBV_EXP_FLOW_SPEC_ETH,
+		.size = eth_size,
+	};
+	if (spec) {
+		memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	if (mask) {
+		memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+	}
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+		eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+		eth->val.src_mac[i] &= eth->mask.src_mac[i];
+	}
+	/* Finalise the flow. */
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 2;
+	return 0;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item, void *data)
+{
+	const struct rte_flow_item_ipv4 *spec = item->spec;
+	const struct rte_flow_item_ipv4 *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_ipv4 *ipv4;
+	unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4);
+
+	ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
+		.type = IBV_EXP_FLOW_SPEC_IPV4,
+		.size = ipv4_size,
+	};
+	if (spec) {
+		ipv4->val = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = spec->hdr.src_addr,
+			.dst_ip = spec->hdr.dst_addr,
+		};
+	}
+	if (mask) {
+		ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
+			.src_ip = mask->hdr.src_addr,
+			.dst_ip = mask->hdr.dst_addr,
+		};
+	}
+	/* Remove unwanted bits from values. */
+	ipv4->val.src_ip &= ipv4->mask.src_ip;
+	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 1;
+	return 0;
+}
+
+/**
+ * Convert IPv6 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item, void *data)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_ipv6 *ipv6;
+	unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6);
+	unsigned int i;
+
+	ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*ipv6 = (struct ibv_exp_flow_spec_ipv6) {
+		.type = IBV_EXP_FLOW_SPEC_IPV6,
+		.size = ipv6_size,
+	};
+	if (spec) {
+		memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
+		       RTE_DIM(ipv6->val.src_ip));
+		memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
+		       RTE_DIM(ipv6->val.dst_ip));
+	}
+	if (mask) {
+		memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
+		       RTE_DIM(ipv6->mask.src_ip));
+		memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
+		       RTE_DIM(ipv6->mask.dst_ip));
+	}
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
+		ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
+		ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+	}
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 1;
+	return 0;
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item, void *data)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_tcp_udp *udp;
+	unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+	udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_UDP,
+		.size = udp_size,
+	};
+	if (spec) {
+		udp->val.dst_port = spec->hdr.dst_port;
+		udp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		udp->mask.dst_port = mask->hdr.dst_port;
+		udp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	udp->val.src_port &= udp->mask.src_port;
+	udp->val.dst_port &= udp->mask.dst_port;
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 0;
+	return 0;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item, void *data)
+{
+	const struct rte_flow_item_tcp *spec = item->spec;
+	const struct rte_flow_item_tcp *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_tcp_udp *tcp;
+	unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+	tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_TCP,
+		.size = tcp_size,
+	};
+	if (spec) {
+		tcp->val.dst_port = spec->hdr.dst_port;
+		tcp->val.src_port = spec->hdr.src_port;
+	}
+	if (mask) {
+		tcp->mask.dst_port = mask->hdr.dst_port;
+		tcp->mask.src_port = mask->hdr.src_port;
+	}
+	/* Remove unwanted bits from values. */
+	tcp->val.src_port &= tcp->mask.src_port;
+	tcp->val.dst_port &= tcp->mask.dst_port;
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 0;
+	return 0;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param ibv_attr
+ *   Verbs flow attributes.
+ * @param action
+ *   Target action structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+			      struct ibv_exp_flow_attr *ibv_attr,
+			      struct mlx5_flow_action *action,
+			      struct rte_flow_error *error)
+{
+	struct rxq_ctrl *rxq;
+	struct rte_flow *rte_flow;
+
+	assert(priv->pd);
+	assert(priv->ctx);
+	rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+	if (!rte_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate flow memory");
+		return NULL;
+	}
+	if (action->drop) {
+		rte_flow->cq =
+			ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+					  &(struct ibv_exp_cq_init_attr){
+						  .comp_mask = 0,
+					  });
+		if (!rte_flow->cq) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_HANDLE,
+					   NULL, "cannot allocate CQ");
+			goto error;
+		}
+		rte_flow->wq = ibv_exp_create_wq(priv->ctx,
+						 &(struct ibv_exp_wq_init_attr){
+						 .wq_type = IBV_EXP_WQT_RQ,
+						 .max_recv_wr = 1,
+						 .max_recv_sge = 1,
+						 .pd = priv->pd,
+						 .cq = rte_flow->cq,
+						 });
+	} else {
+		rxq = container_of((*priv->rxqs)[action->queue_id],
+				   struct rxq_ctrl, rxq);
+		rte_flow->rxq = &rxq->rxq;
+		rte_flow->wq = rxq->wq;
+	}
+	rte_flow->ibv_attr = ibv_attr;
+	rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
+		priv->ctx,
+		&(struct ibv_exp_rwq_ind_table_init_attr){
+			.pd = priv->pd,
+			.log_ind_tbl_size = 0,
+			.ind_tbl = &rte_flow->wq,
+			.comp_mask = 0,
+		});
+	if (!rte_flow->ind_table) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate indirection table");
+		goto error;
+	}
+	rte_flow->qp = ibv_exp_create_qp(
+		priv->ctx,
+		&(struct ibv_exp_qp_init_attr){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_EXP_QP_INIT_ATTR_PD |
+				IBV_EXP_QP_INIT_ATTR_PORT |
+				IBV_EXP_QP_INIT_ATTR_RX_HASH,
+			.pd = priv->pd,
+			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+				.rx_hash_function =
+					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_hash_default_key_len,
+				.rx_hash_key = rss_hash_default_key,
+				.rx_hash_fields_mask = 0,
+				.rwq_ind_tbl = rte_flow->ind_table,
+			},
+			.port_num = priv->port,
+		});
+	if (!rte_flow->qp) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate QP");
+		goto error;
+	}
+	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+						 rte_flow->ibv_attr);
+	if (!rte_flow->ibv_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "flow rule creation failure");
+		goto error;
+	}
+	return rte_flow;
+error:
+	assert(rte_flow);
+	if (rte_flow->qp)
+		ibv_destroy_qp(rte_flow->qp);
+	if (rte_flow->ind_table)
+		ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
+	if (!rte_flow->rxq && rte_flow->wq)
+		ibv_exp_destroy_wq(rte_flow->wq);
+	if (!rte_flow->rxq && rte_flow->cq)
+		ibv_destroy_cq(rte_flow->cq);
+	rte_free(rte_flow->ibv_attr);
+	rte_free(rte_flow);
+	return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow on success, NULL otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct rte_flow *rte_flow;
+	struct mlx5_flow_action action;
+	struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
+	int err;
+
+	err = priv_flow_validate(priv, attr, items, actions, error, &flow);
+	if (err)
+		goto exit;
+	flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
+	flow.offset = sizeof(struct ibv_exp_flow_attr);
+	if (!flow.ibv_attr) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate ibv_attr memory");
+		goto exit;
+	}
+	*flow.ibv_attr = (struct ibv_exp_flow_attr){
+		.type = IBV_EXP_FLOW_ATTR_NORMAL,
+		.size = sizeof(struct ibv_exp_flow_attr),
+		.priority = attr->priority,
+		.num_of_specs = 0,
+		.port = 0,
+		.flags = 0,
+		.reserved = 0,
+	};
+	claim_zero(priv_flow_validate(priv, attr, items, actions,
+				      error, &flow));
+	action = (struct mlx5_flow_action){
+		.queue = 0,
+		.drop = 0,
+	};
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			action.queue = 1;
+			action.queue_id =
+				((const struct rte_flow_action_queue *)
+				 actions->conf)->index;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+			action.drop = 1;
+		} else {
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "unsupported action");
+			goto exit;
+		}
+	}
+	rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
+						 &action, error);
+	return rte_flow;
+exit:
+	rte_free(flow.ibv_attr);
+	return NULL;
 }
 
 /**
@@ -74,15 +844,46 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		 const struct rte_flow_action actions[],
 		 struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return NULL;
+	struct priv *priv = dev->data->dev_private;
+	struct rte_flow *flow;
+
+	priv_lock(priv);
+	flow = priv_flow_create(priv, attr, items, actions, error);
+	if (flow) {
+		LIST_INSERT_HEAD(&priv->flows, flow, next);
+		DEBUG("Flow created %p", (void *)flow);
+	}
+	priv_unlock(priv);
+	return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] flow
+ *   Flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv,
+		  struct rte_flow *flow)
+{
+	(void)priv;
+	LIST_REMOVE(flow, next);
+	if (flow->ibv_flow)
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+	if (flow->qp)
+		claim_zero(ibv_destroy_qp(flow->qp));
+	if (flow->ind_table)
+		claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
+	if (!flow->rxq && flow->wq)
+		claim_zero(ibv_exp_destroy_wq(flow->wq));
+	if (!flow->rxq && flow->cq)
+		claim_zero(ibv_destroy_cq(flow->cq));
+	rte_free(flow->ibv_attr);
+	DEBUG("Flow destroyed %p", (void *)flow);
+	rte_free(flow);
 }
 
 /**
@@ -96,13 +897,30 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
 		  struct rte_flow *flow,
 		  struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)flow;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_destroy(priv, flow);
+	priv_unlock(priv);
+	return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ */
+static void
+priv_flow_flush(struct priv *priv)
+{
+	while (!LIST_EMPTY(&priv->flows)) {
+		struct rte_flow *flow;
+
+		flow = LIST_FIRST(&priv->flows);
+		priv_flow_destroy(priv, flow);
+	}
 }
 
 /**
@@ -115,10 +933,62 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
-	(void)dev;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_flush(priv);
+	priv_unlock(priv);
+	return 0;
+}
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ */
+void
+priv_flow_stop(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+		flow->ibv_flow = NULL;
+		DEBUG("Flow %p removed", (void *)flow);
+	}
+}
+
+/**
+ * Add all flows.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+priv_flow_start(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		flow->ibv_flow = ibv_exp_create_flow(flow->qp,
+						     flow->ibv_attr);
+		if (!flow->ibv_flow) {
+			DEBUG("Flow %p cannot be applied", (void *)flow);
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+		DEBUG("Flow %p applied", (void *)flow);
+	}
+	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d4dccd8..2399243 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -90,6 +90,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
 		priv_fdir_enable(priv);
 	priv_dev_interrupt_handler_install(priv, dev);
+	err = priv_flow_start(priv);
 	priv_unlock(priv);
 	return -err;
 }
@@ -120,6 +121,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv_mac_addrs_disable(priv);
 	priv_destroy_hash_rxqs(priv);
 	priv_fdir_disable(priv);
+	priv_flow_stop(priv);
 	priv_dev_interrupt_handler_uninstall(priv, dev);
 	priv->started = 0;
 	priv_unlock(priv);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v4 3/6] net/mlx5: support VLAN flow item
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
                       ` (2 preceding siblings ...)
  2016-12-28 10:37     ` [PATCH v4 2/6] net/mlx5: support basic flow items and actions Nelio Laranjeiro
@ 2016-12-28 10:37     ` Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 4/6] net/mlx5: support VXLAN " Nelio Laranjeiro
                       ` (2 subsequent siblings)
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-28 10:37 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 54 +++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 53 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ebae2b5..549da6c 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -55,6 +55,9 @@ static int
 mlx5_flow_create_eth(const struct rte_flow_item *item, void *data);
 
 static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item, void *data);
+
+static int
 mlx5_flow_create_ipv4(const struct rte_flow_item *item, void *data);
 
 static int
@@ -131,7 +134,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
 	},
 	[RTE_FLOW_ITEM_TYPE_ETH] = {
-		.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
+			       RTE_FLOW_ITEM_TYPE_IPV4,
 			       RTE_FLOW_ITEM_TYPE_IPV6),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_eth){
@@ -142,6 +146,17 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.convert = mlx5_flow_create_eth,
 		.dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
 	},
+	[RTE_FLOW_ITEM_TYPE_VLAN] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+			       RTE_FLOW_ITEM_TYPE_IPV6),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_vlan){
+			.tci = -1,
+		},
+		.mask_sz = sizeof(struct rte_flow_item_vlan),
+		.convert = mlx5_flow_create_vlan,
+		.dst_sz = 0,
+	},
 	[RTE_FLOW_ITEM_TYPE_IPV4] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
 			       RTE_FLOW_ITEM_TYPE_TCP),
@@ -348,6 +363,17 @@ priv_flow_validate(struct priv *priv,
 
 		if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
 			continue;
+		/* Handle special situation for VLAN. */
+		if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (((const struct rte_flow_item_vlan *)items)->tci >
+			    ETHER_MAX_VLAN_ID) {
+				rte_flow_error_set(error, ENOTSUP,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   items,
+						   "wrong VLAN id value");
+				return -rte_errno;
+			}
+		}
 		for (i = 0;
 		     cur_item->items &&
 		     cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
@@ -471,6 +497,32 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, void *data)
 }
 
 /**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item, void *data)
+{
+	const struct rte_flow_item_vlan *spec = item->spec;
+	const struct rte_flow_item_vlan *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_eth *eth;
+	const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+
+	eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
+	if (spec)
+		eth->val.vlan_tag = spec->tci;
+	if (mask)
+		eth->mask.vlan_tag = mask->tci;
+	eth->val.vlan_tag &= eth->mask.vlan_tag;
+	return 0;
+}
+
+/**
  * Convert IPv4 item to Verbs specification.
  *
  * @param item[in]
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v4 4/6] net/mlx5: support VXLAN flow item
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
                       ` (3 preceding siblings ...)
  2016-12-28 10:37     ` [PATCH v4 3/6] net/mlx5: support VLAN flow item Nelio Laranjeiro
@ 2016-12-28 10:37     ` Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 5/6] net/mlx5: support mark flow action Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 6/6] net/mlx5: extend IPv4 flow item Nelio Laranjeiro
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-28 10:37 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 72 ++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 66 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 549da6c..1ec0ef5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -69,6 +69,9 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, void *data);
 static int
 mlx5_flow_create_tcp(const struct rte_flow_item *item, void *data);
 
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item, void *data);
+
 struct rte_flow {
 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
 	struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
@@ -123,7 +126,8 @@ static const enum rte_flow_action_type valid_actions[] = {
 static const struct mlx5_flow_items mlx5_flow_items[] = {
 	[RTE_FLOW_ITEM_TYPE_VOID] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VOID,
-			       RTE_FLOW_ITEM_TYPE_ETH),
+			       RTE_FLOW_ITEM_TYPE_ETH,
+			       RTE_FLOW_ITEM_TYPE_VXLAN),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_eth){
 			.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
@@ -196,6 +200,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6),
 	},
 	[RTE_FLOW_ITEM_TYPE_UDP] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_udp){
 			.hdr = {
@@ -219,12 +224,23 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.convert = mlx5_flow_create_tcp,
 		.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
 	},
+	[RTE_FLOW_ITEM_TYPE_VXLAN] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_vxlan){
+			.vni = "\xff\xff\xff",
+		},
+		.mask_sz = sizeof(struct rte_flow_item_vxlan),
+		.convert = mlx5_flow_create_vxlan,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
+	},
 };
 
 /** Structure to pass to the conversion function. */
 struct mlx5_flow {
 	struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
 	unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+	uint32_t inner; /**< Set once VXLAN is encountered. */
 };
 
 struct mlx5_flow_action {
@@ -474,7 +490,7 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, void *data)
 
 	eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*eth = (struct ibv_exp_flow_spec_eth) {
-		.type = IBV_EXP_FLOW_SPEC_ETH,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
 		.size = eth_size,
 	};
 	if (spec) {
@@ -541,7 +557,7 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, void *data)
 
 	ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
-		.type = IBV_EXP_FLOW_SPEC_IPV4,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4,
 		.size = ipv4_size,
 	};
 	if (spec) {
@@ -584,7 +600,7 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, void *data)
 
 	ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*ipv6 = (struct ibv_exp_flow_spec_ipv6) {
-		.type = IBV_EXP_FLOW_SPEC_IPV6,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6,
 		.size = ipv6_size,
 	};
 	if (spec) {
@@ -628,7 +644,7 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, void *data)
 
 	udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
-		.type = IBV_EXP_FLOW_SPEC_UDP,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
 		.size = udp_size,
 	};
 	if (spec) {
@@ -666,7 +682,7 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, void *data)
 
 	tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
-		.type = IBV_EXP_FLOW_SPEC_TCP,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
 		.size = tcp_size,
 	};
 	if (spec) {
@@ -686,6 +702,49 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, void *data)
 }
 
 /**
+ * Convert VXLAN item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item, void *data)
+{
+	const struct rte_flow_item_vxlan *spec = item->spec;
+	const struct rte_flow_item_vxlan *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_tunnel *vxlan;
+	unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
+	union vni {
+		uint32_t vlan_id;
+		uint8_t vni[4];
+	} id;
+
+	id.vni[0] = 0;
+	vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*vxlan = (struct ibv_exp_flow_spec_tunnel) {
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
+		.size = size,
+	};
+	if (spec) {
+		memcpy(&id.vni[1], spec->vni, 3);
+		vxlan->val.tunnel_id = id.vlan_id;
+	}
+	if (mask) {
+		memcpy(&id.vni[1], mask->vni, 3);
+		vxlan->mask.tunnel_id = id.vlan_id;
+	}
+	/* Remove unwanted bits from values. */
+	vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 0;
+	flow->inner = IBV_EXP_FLOW_SPEC_INNER;
+	return 0;
+}
+
+/**
  * Complete flow rule creation.
  *
  * @param priv
@@ -852,6 +911,7 @@ priv_flow_create(struct priv *priv,
 		.flags = 0,
 		.reserved = 0,
 	};
+	flow.inner = 0;
 	claim_zero(priv_flow_validate(priv, attr, items, actions,
 				      error, &flow));
 	action = (struct mlx5_flow_action){
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v4 5/6] net/mlx5: support mark flow action
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
                       ` (4 preceding siblings ...)
  2016-12-28 10:37     ` [PATCH v4 4/6] net/mlx5: support VXLAN " Nelio Laranjeiro
@ 2016-12-28 10:37     ` Nelio Laranjeiro
  2016-12-28 10:37     ` [PATCH v4 6/6] net/mlx5: extend IPv4 flow item Nelio Laranjeiro
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-28 10:37 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_prm.h  | 70 ++++++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.c | 12 ++++++-
 drivers/net/mlx5/mlx5_rxtx.h |  3 +-
 4 files changed, 160 insertions(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 1ec0ef5..01f7a77 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -50,6 +50,7 @@
 #include <rte_malloc.h>
 
 #include "mlx5.h"
+#include "mlx5_prm.h"
 
 static int
 mlx5_flow_create_eth(const struct rte_flow_item *item, void *data);
@@ -81,6 +82,7 @@ struct rte_flow {
 	struct ibv_exp_wq *wq; /**< Verbs work queue. */
 	struct ibv_cq *cq; /**< Verbs completion queue. */
 	struct rxq *rxq; /**< Pointer to the queue, NULL if drop queue. */
+	uint32_t mark:1; /**< Set if the flow is marked. */
 };
 
 /** Static initializer for items. */
@@ -119,6 +121,7 @@ struct mlx5_flow_items {
 static const enum rte_flow_action_type valid_actions[] = {
 	RTE_FLOW_ACTION_TYPE_DROP,
 	RTE_FLOW_ACTION_TYPE_QUEUE,
+	RTE_FLOW_ACTION_TYPE_MARK,
 	RTE_FLOW_ACTION_TYPE_END,
 };
 
@@ -246,7 +249,9 @@ struct mlx5_flow {
 struct mlx5_flow_action {
 	uint32_t queue:1; /**< Target is a receive queue. */
 	uint32_t drop:1; /**< Target is a drop queue. */
+	uint32_t mark:1; /**< Mark is present in the flow. */
 	uint32_t queue_id; /**< Identifier of the queue. */
+	uint32_t mark_id; /**< Mark identifier. */
 };
 
 /**
@@ -341,6 +346,7 @@ priv_flow_validate(struct priv *priv,
 	struct mlx5_flow_action action = {
 		.queue = 0,
 		.drop = 0,
+		.mark = 0,
 	};
 
 	(void)priv;
@@ -427,10 +433,26 @@ priv_flow_validate(struct priv *priv,
 			if (!queue || (queue->index > (priv->rxqs_n - 1)))
 				goto exit_action_not_supported;
 			action.queue = 1;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
+			const struct rte_flow_action_mark *mark =
+				(const struct rte_flow_action_mark *)
+				actions->conf;
+
+			if (mark && (mark->id >= MLX5_FLOW_MARK_MAX)) {
+				rte_flow_error_set(error, ENOTSUP,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "mark must be between 0"
+						   " and 16777199");
+				return -rte_errno;
+			}
+			action.mark = 1;
 		} else {
 			goto exit_action_not_supported;
 		}
 	}
+	if (action.mark && !flow->ibv_attr)
+		flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
 	if (!action.queue && !action.drop) {
 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
 				   NULL, "no valid action");
@@ -745,6 +767,30 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, void *data)
 }
 
 /**
+ * Convert mark/flag action to Verbs specification.
+ *
+ * @param flow
+ *   Pointer to MLX5 flow structure.
+ * @param mark_id
+ *   Mark identifier.
+ */
+static int
+mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
+{
+	struct ibv_exp_flow_spec_action_tag *tag;
+	unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
+
+	tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*tag = (struct ibv_exp_flow_spec_action_tag){
+		.type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
+		.size = size,
+		.tag_id = mlx5_flow_mark_set(mark_id),
+	};
+	++flow->ibv_attr->num_of_specs;
+	return 0;
+}
+
+/**
  * Complete flow rule creation.
  *
  * @param priv
@@ -800,8 +846,10 @@ priv_flow_create_action_queue(struct priv *priv,
 		rxq = container_of((*priv->rxqs)[action->queue_id],
 				   struct rxq_ctrl, rxq);
 		rte_flow->rxq = &rxq->rxq;
+		rxq->rxq.mark |= action->mark;
 		rte_flow->wq = rxq->wq;
 	}
+	rte_flow->mark = action->mark;
 	rte_flow->ibv_attr = ibv_attr;
 	rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
 		priv->ctx,
@@ -917,6 +965,8 @@ priv_flow_create(struct priv *priv,
 	action = (struct mlx5_flow_action){
 		.queue = 0,
 		.drop = 0,
+		.mark = 0,
+		.mark_id = MLX5_FLOW_MARK_DEFAULT,
 	};
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
@@ -928,6 +978,14 @@ priv_flow_create(struct priv *priv,
 				 actions->conf)->index;
 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
 			action.drop = 1;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
+			const struct rte_flow_action_mark *mark =
+				(const struct rte_flow_action_mark *)
+				actions->conf;
+
+			if (mark)
+				action.mark_id = mark->id;
+			action.mark = 1;
 		} else {
 			rte_flow_error_set(error, ENOTSUP,
 					   RTE_FLOW_ERROR_TYPE_ACTION,
@@ -935,6 +993,10 @@ priv_flow_create(struct priv *priv,
 			goto exit;
 		}
 	}
+	if (action.mark) {
+		mlx5_flow_create_flag_mark(&flow, action.mark_id);
+		flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+	}
 	rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
 						 &action, error);
 	return rte_flow;
@@ -993,6 +1055,18 @@ priv_flow_destroy(struct priv *priv,
 		claim_zero(ibv_exp_destroy_wq(flow->wq));
 	if (!flow->rxq && flow->cq)
 		claim_zero(ibv_destroy_cq(flow->cq));
+	if (flow->mark) {
+		struct rte_flow *tmp;
+		uint32_t mark_n = 0;
+
+		for (tmp = LIST_FIRST(&priv->flows);
+		     tmp;
+		     tmp = LIST_NEXT(tmp, next)) {
+			if ((flow->rxq == tmp->rxq) && tmp->mark)
+				++mark_n;
+		}
+		flow->rxq->mark = !!mark_n;
+	}
 	rte_free(flow->ibv_attr);
 	DEBUG("Flow destroyed %p", (void *)flow);
 	rte_free(flow);
@@ -1072,6 +1146,8 @@ priv_flow_stop(struct priv *priv)
 	     flow = LIST_NEXT(flow, next)) {
 		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
 		flow->ibv_flow = NULL;
+		if (flow->mark)
+			flow->rxq->mark = 0;
 		DEBUG("Flow %p removed", (void *)flow);
 	}
 }
@@ -1101,6 +1177,8 @@ priv_flow_start(struct priv *priv)
 			return rte_errno;
 		}
 		DEBUG("Flow %p applied", (void *)flow);
+		if (flow->rxq)
+			flow->rxq->mark |= flow->mark;
 	}
 	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 9cd9fdf..d9bb332 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -34,6 +34,8 @@
 #ifndef RTE_PMD_MLX5_PRM_H_
 #define RTE_PMD_MLX5_PRM_H_
 
+#include <assert.h>
+
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
 #ifdef PEDANTIC
@@ -106,6 +108,15 @@
 /* Outer UDP header and checksum OK. */
 #define MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK (1u << 6)
 
+/* INVALID is used by packets matching no flow rules. */
+#define MLX5_FLOW_MARK_INVALID 0
+
+/* Maximum allowed value to mark a packet. */
+#define MLX5_FLOW_MARK_MAX 0xfffff0
+
+/* Default mark value used when none is provided. */
+#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+
 /* Subset of struct mlx5_wqe_eth_seg. */
 struct mlx5_wqe_eth_seg_small {
 	uint32_t rsvd0;
@@ -183,10 +194,67 @@ struct mlx5_cqe {
 	uint8_t rsvd2[12];
 	uint32_t byte_cnt;
 	uint64_t timestamp;
-	uint8_t rsvd3[4];
+	uint32_t sop_drop_qpn;
 	uint16_t wqe_counter;
 	uint8_t rsvd4;
 	uint8_t op_own;
 };
 
+/**
+ * Convert a user mark to flow mark.
+ *
+ * @param val
+ *   Mark value to convert.
+ *
+ * @return
+ *   Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_set(uint32_t val)
+{
+	uint32_t ret;
+
+	/*
+	 * Add one to the user value to differentiate un-marked flows from
+	 * marked flows.
+	 */
+	++val;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+	/*
+	 * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
+	 * word, byte-swapped by the kernel on little-endian systems. In this
+	 * case, left-shifting the resulting big-endian value ensures the
+	 * least significant 24 bits are retained when converting it back.
+	 */
+	ret = rte_cpu_to_be_32(val) >> 8;
+#else
+	ret = val;
+#endif
+	assert(ret <= MLX5_FLOW_MARK_MAX);
+	return ret;
+}
+
+/**
+ * Convert a mark to user mark.
+ *
+ * @param val
+ *   Mark value to convert.
+ *
+ * @return
+ *   Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_get(uint32_t val)
+{
+	/*
+	 * Subtract one from the retrieved value. It was added by
+	 * mlx5_flow_mark_set() to distinguish unmarked flows.
+	 */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+	return (val >> 8) - 1;
+#else
+	return val - 1;
+#endif
+}
+
 #endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 6f86ded..8f0b4a6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -113,7 +113,7 @@ static inline int
 check_cqe_seen(volatile struct mlx5_cqe *cqe)
 {
 	static const uint8_t magic[] = "seen";
-	volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3;
+	volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
 	int ret = 1;
 	unsigned int i;
 
@@ -1357,6 +1357,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 				pkt->hash.rss = rss_hash_res;
 				pkt->ol_flags = PKT_RX_RSS_HASH;
 			}
+			if (rxq->mark &&
+			    ((cqe->sop_drop_qpn !=
+			      htonl(MLX5_FLOW_MARK_INVALID)) ||
+			     (cqe->sop_drop_qpn !=
+			      htonl(MLX5_FLOW_MARK_DEFAULT)))) {
+				pkt->hash.fdir.hi =
+					mlx5_flow_mark_get(cqe->sop_drop_qpn);
+				pkt->ol_flags &= ~PKT_RX_RSS_HASH;
+				pkt->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+			}
 			if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
 			    rxq->crc_present) {
 				if (rxq->csum) {
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index e244c48..302ca49 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -114,7 +114,8 @@ struct rxq {
 	unsigned int elts_n:4; /* Log 2 of Mbufs. */
 	unsigned int port_id:8;
 	unsigned int rss_hash:1; /* RSS hash result is enabled. */
-	unsigned int :9; /* Remaining bits. */
+	unsigned int mark:1; /* Marked flow available on the queue. */
+	unsigned int :8; /* Remaining bits. */
 	volatile uint32_t *rq_db;
 	volatile uint32_t *cq_db;
 	uint16_t rq_ci;
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v4 6/6] net/mlx5: extend IPv4 flow item
  2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
                       ` (5 preceding siblings ...)
  2016-12-28 10:37     ` [PATCH v4 5/6] net/mlx5: support mark flow action Nelio Laranjeiro
@ 2016-12-28 10:37     ` Nelio Laranjeiro
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-28 10:37 UTC (permalink / raw)
  To: dev; +Cc: Adrien Mazarguil

This commits adds:
- Type of service
- Next protocol ID

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 22 +++++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 01f7a77..77021b5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -172,11 +172,13 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 			.hdr = {
 				.src_addr = -1,
 				.dst_addr = -1,
+				.type_of_service = -1,
+				.next_proto_id = -1,
 			},
 		},
 		.mask_sz = sizeof(struct rte_flow_item_ipv4),
 		.convert = mlx5_flow_create_ipv4,
-		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4),
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
 	},
 	[RTE_FLOW_ITEM_TYPE_IPV6] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
@@ -574,29 +576,35 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, void *data)
 	const struct rte_flow_item_ipv4 *spec = item->spec;
 	const struct rte_flow_item_ipv4 *mask = item->mask;
 	struct mlx5_flow *flow = (struct mlx5_flow *)data;
-	struct ibv_exp_flow_spec_ipv4 *ipv4;
-	unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4);
+	struct ibv_exp_flow_spec_ipv4_ext *ipv4;
+	unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
 
 	ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
-	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
-		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4,
+	*ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
 		.size = ipv4_size,
 	};
 	if (spec) {
-		ipv4->val = (struct ibv_exp_flow_ipv4_filter){
+		ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
 			.src_ip = spec->hdr.src_addr,
 			.dst_ip = spec->hdr.dst_addr,
+			.proto = spec->hdr.next_proto_id,
+			.tos = spec->hdr.type_of_service,
 		};
 	}
 	if (mask) {
-		ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
+		ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
 			.src_ip = mask->hdr.src_addr,
 			.dst_ip = mask->hdr.dst_addr,
+			.proto = mask->hdr.next_proto_id,
+			.tos = mask->hdr.type_of_service,
 		};
 	}
 	/* Remove unwanted bits from values. */
 	ipv4->val.src_ip &= ipv4->mask.src_ip;
 	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+	ipv4->val.proto &= ipv4->mask.proto;
+	ipv4->val.tos &= ipv4->mask.tos;
 	++flow->ibv_attr->num_of_specs;
 	flow->ibv_attr->priority = 1;
 	return 0;
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v5 0/6] net/mlx5: support flow API
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
@ 2016-12-29 15:15       ` Nelio Laranjeiro
  2017-01-03 16:19         ` Ferruh Yigit
  2017-01-04 14:48         ` Ferruh Yigit
  2016-12-29 15:15       ` [PATCH v5 1/6] net/mlx5: add preliminary flow API support Nelio Laranjeiro
                         ` (5 subsequent siblings)
  6 siblings, 2 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-29 15:15 UTC (permalink / raw)
  To: dev, Ferruh Yigit; +Cc: Adrien Mazarguil

Changes in v5:

 - Fix masking when only spec is present in item structure.
 - Fix first element of flow items array.

Changes in v4:

 - Simplify flow parsing by using a graph.
 - Add VXLAN flow item.
 - Add mark flow action.
 - Extend IPv4 filter item (Type of service, Next Protocol ID).

Changes in v3:

 - Fix Ethernet ether type issue.

Changes in v2:

 - Fix several issues.
 - Support VLAN filtering.

Nelio Laranjeiro (6):
  net/mlx5: add preliminary flow API support
  net/mlx5: support basic flow items and actions
  net/mlx5: support VLAN flow item
  net/mlx5: support VXLAN flow item
  net/mlx5: support mark flow action
  net/mlx5: extend IPv4 flow item

 drivers/net/mlx5/Makefile       |    1 +
 drivers/net/mlx5/mlx5.h         |   19 +
 drivers/net/mlx5/mlx5_fdir.c    |   15 +
 drivers/net/mlx5/mlx5_flow.c    | 1248 +++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_prm.h     |   70 ++-
 drivers/net/mlx5/mlx5_rxtx.c    |   12 +-
 drivers/net/mlx5/mlx5_rxtx.h    |    3 +-
 drivers/net/mlx5/mlx5_trigger.c |    2 +
 8 files changed, 1367 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

-- 
2.1.4

^ permalink raw reply	[flat|nested] 38+ messages in thread

* [PATCH v5 1/6] net/mlx5: add preliminary flow API support
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 " Nelio Laranjeiro
@ 2016-12-29 15:15       ` Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 2/6] net/mlx5: support basic flow items and actions Nelio Laranjeiro
                         ` (4 subsequent siblings)
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-29 15:15 UTC (permalink / raw)
  To: dev, Ferruh Yigit; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/Makefile    |   1 +
 drivers/net/mlx5/mlx5.h      |  16 ++++++
 drivers/net/mlx5/mlx5_fdir.c |  15 ++++++
 drivers/net/mlx5/mlx5_flow.c | 124 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 156 insertions(+)
 create mode 100644 drivers/net/mlx5/mlx5_flow.c

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index cf87f0b..6d1338a 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -48,6 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
 SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
 
 # Dependencies.
 DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a60..04f4eaa 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -59,6 +59,7 @@
 #include <rte_spinlock.h>
 #include <rte_interrupts.h>
 #include <rte_errno.h>
+#include <rte_flow.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -268,4 +269,19 @@ void priv_fdir_enable(struct priv *);
 int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
 			 enum rte_filter_op, void *);
 
+/* mlx5_flow.c */
+
+int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
+		       const struct rte_flow_item [],
+		       const struct rte_flow_action [],
+		       struct rte_flow_error *);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
+				  const struct rte_flow_attr *,
+				  const struct rte_flow_item [],
+				  const struct rte_flow_action [],
+				  struct rte_flow_error *);
+int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
+		      struct rte_flow_error *);
+int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf682..f80c58b 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -55,6 +55,8 @@
 #include <rte_malloc.h>
 #include <rte_ethdev.h>
 #include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 #ifdef PEDANTIC
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
@@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
 	return ret;
 }
 
+static const struct rte_flow_ops mlx5_flow_ops = {
+	.validate = mlx5_flow_validate,
+	.create = mlx5_flow_create,
+	.destroy = mlx5_flow_destroy,
+	.flush = mlx5_flow_flush,
+	.query = NULL,
+};
+
 /**
  * Manage filter operations.
  *
@@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 	struct priv *priv = dev->data->dev_private;
 
 	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &mlx5_flow_ops;
+		return 0;
 	case RTE_ETH_FILTER_FDIR:
 		priv_lock(priv);
 		ret = priv_fdir_ctrl_func(priv, filter_op, arg);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 0000000..4fdefa0
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,124 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 6WIND S.A.
+ *   Copyright 2016 Mellanox.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of 6WIND S.A. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "mlx5.h"
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)attr;
+	(void)items;
+	(void)actions;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return NULL;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)flow;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)error;
+	rte_flow_error_set(error, ENOTSUP,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL, "not implemented yet");
+	return -rte_errno;
+}
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v5 2/6] net/mlx5: support basic flow items and actions
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 " Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 1/6] net/mlx5: add preliminary flow API support Nelio Laranjeiro
@ 2016-12-29 15:15       ` Nelio Laranjeiro
  2017-01-04 17:49         ` Ferruh Yigit
  2016-12-29 15:15       ` [PATCH v5 3/6] net/mlx5: support VLAN flow item Nelio Laranjeiro
                         ` (3 subsequent siblings)
  6 siblings, 1 reply; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-29 15:15 UTC (permalink / raw)
  To: dev, Ferruh Yigit; +Cc: Adrien Mazarguil

Introduce initial software for rte_flow rules.

VLAN, VXLAN are still not supported.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5.h         |   3 +
 drivers/net/mlx5/mlx5_flow.c    | 954 ++++++++++++++++++++++++++++++++++++++--
 drivers/net/mlx5/mlx5_trigger.c |   2 +
 3 files changed, 930 insertions(+), 29 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 04f4eaa..c415ce3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -136,6 +136,7 @@ struct priv {
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
 	struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
+	LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	rte_spinlock_t lock; /* Lock for control functions. */
 };
@@ -283,5 +284,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
 int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
 		      struct rte_flow_error *);
 int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+int priv_flow_start(struct priv *);
+void priv_flow_stop(struct priv *);
 
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4fdefa0..4f6696e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -31,12 +31,387 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <sys/queue.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
 #include <rte_ethdev.h>
 #include <rte_flow.h>
 #include <rte_flow_driver.h>
+#include <rte_malloc.h>
 
 #include "mlx5.h"
 
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+		     const void *default_mask,
+		     void *data);
+
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+		      const void *default_mask,
+		      void *data);
+
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+		      const void *default_mask,
+		      void *data);
+
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+		     const void *default_mask,
+		     void *data);
+
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+		     const void *default_mask,
+		     void *data);
+
+struct rte_flow {
+	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+	struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+	struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+	struct ibv_qp *qp; /**< Verbs queue pair. */
+	struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
+	struct ibv_exp_wq *wq; /**< Verbs work queue. */
+	struct ibv_cq *cq; /**< Verbs completion queue. */
+	struct rxq *rxq; /**< Pointer to the queue, NULL if drop queue. */
+};
+
+/** Static initializer for items. */
+#define ITEMS(...) \
+	(const enum rte_flow_item_type []){ \
+		__VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+	}
+
+/** Structure to generate a simple graph of layers supported by the NIC. */
+struct mlx5_flow_items {
+	/** List of possible actions for these items. */
+	const enum rte_flow_action_type *const actions;
+	/** Bit-masks corresponding to the possibilities for the item. */
+	const void *mask;
+	/** Bit-masks size in bytes. */
+	const unsigned int mask_sz;
+	/**
+	 * Conversion function from rte_flow to NIC specific flow.
+	 *
+	 * @param item
+	 *   rte_flow item to convert.
+	 * @param default_mask
+	 *   Default bit-masks to use when item->mask is not provided.
+	 * @param data
+	 *   Internal structure to store the conversion.
+	 *
+	 * @return
+	 *   0 on success, negative value otherwise.
+	 */
+	int (*convert)(const struct rte_flow_item *item,
+		       const void *default_mask,
+		       void *data);
+	/** Size in bytes of the destination structure. */
+	const unsigned int dst_sz;
+	/** List of possible following items.  */
+	const enum rte_flow_item_type *const items;
+};
+
+/** Valid action for this PMD. */
+static const enum rte_flow_action_type valid_actions[] = {
+	RTE_FLOW_ACTION_TYPE_DROP,
+	RTE_FLOW_ACTION_TYPE_QUEUE,
+	RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Graph of supported items and associated actions. */
+static const struct mlx5_flow_items mlx5_flow_items[] = {
+	[RTE_FLOW_ITEM_TYPE_END] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+	},
+	[RTE_FLOW_ITEM_TYPE_ETH] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+			       RTE_FLOW_ITEM_TYPE_IPV6),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_eth){
+			.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+			.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+		},
+		.mask_sz = sizeof(struct rte_flow_item_eth),
+		.convert = mlx5_flow_create_eth,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+	},
+	[RTE_FLOW_ITEM_TYPE_IPV4] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+			       RTE_FLOW_ITEM_TYPE_TCP),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_ipv4){
+			.hdr = {
+				.src_addr = -1,
+				.dst_addr = -1,
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_ipv4),
+		.convert = mlx5_flow_create_ipv4,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4),
+	},
+	[RTE_FLOW_ITEM_TYPE_IPV6] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+			       RTE_FLOW_ITEM_TYPE_TCP),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_ipv6){
+			.hdr = {
+				.src_addr = {
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+				},
+				.dst_addr = {
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+					0xff, 0xff, 0xff, 0xff,
+				},
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_ipv6),
+		.convert = mlx5_flow_create_ipv6,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6),
+	},
+	[RTE_FLOW_ITEM_TYPE_UDP] = {
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_udp){
+			.hdr = {
+				.src_port = -1,
+				.dst_port = -1,
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_udp),
+		.convert = mlx5_flow_create_udp,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	},
+	[RTE_FLOW_ITEM_TYPE_TCP] = {
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_tcp){
+			.hdr = {
+				.src_port = -1,
+				.dst_port = -1,
+			},
+		},
+		.mask_sz = sizeof(struct rte_flow_item_tcp),
+		.convert = mlx5_flow_create_tcp,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+	},
+};
+
+/** Structure to pass to the conversion function. */
+struct mlx5_flow {
+	struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
+	unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+};
+
+struct mlx5_flow_action {
+	uint32_t queue:1; /**< Target is a receive queue. */
+	uint32_t drop:1; /**< Target is a drop queue. */
+	uint32_t queue_id; /**< Identifier of the queue. */
+};
+
+/**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param mask[in]
+ *   Bit-masks covering supported fields to compare with spec, last and mask in
+ *   \item.
+ * @param size
+ *   Bit-Mask size in bytes.
+ *
+ * @return
+ *   0 on success.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+			const uint8_t *mask, unsigned int size)
+{
+	int ret = 0;
+
+	if (!item->spec && (item->mask || item->last))
+		return -1;
+	if (item->spec && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->spec;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->last && !item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->last;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->mask) {
+		unsigned int i;
+		const uint8_t *spec = item->mask;
+
+		for (i = 0; i < size; ++i)
+			if ((spec[i] | mask[i]) != mask[i])
+				return -1;
+	}
+	if (item->spec && item->last) {
+		uint8_t spec[size];
+		uint8_t last[size];
+		const uint8_t *apply = mask;
+		unsigned int i;
+
+		if (item->mask)
+			apply = item->mask;
+		for (i = 0; i < size; ++i) {
+			spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+			last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+		}
+		ret = memcmp(spec, last, size);
+	}
+	return ret;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ *   Flow structure to update.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error,
+		   struct mlx5_flow *flow)
+{
+	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+	struct mlx5_flow_action action = {
+		.queue = 0,
+		.drop = 0,
+	};
+
+	(void)priv;
+	if (attr->group) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   NULL,
+				   "groups are not supported");
+		return -rte_errno;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   NULL,
+				   "priorities are not supported");
+		return -rte_errno;
+	}
+	if (attr->egress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   NULL,
+				   "egress is not supported");
+		return -rte_errno;
+	}
+	if (!attr->ingress) {
+		rte_flow_error_set(error, ENOTSUP,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   NULL,
+				   "only ingress is supported");
+		return -rte_errno;
+	}
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+		const struct mlx5_flow_items *token = NULL;
+		unsigned int i;
+		int err;
+
+		if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+			continue;
+		for (i = 0;
+		     cur_item->items &&
+		     cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+		     ++i) {
+			if (cur_item->items[i] == items->type) {
+				token = &mlx5_flow_items[items->type];
+				break;
+			}
+		}
+		if (!token)
+			goto exit_item_not_supported;
+		cur_item = token;
+		err = mlx5_flow_item_validate(items,
+					      (const uint8_t *)cur_item->mask,
+					      sizeof(cur_item->mask_sz));
+		if (err)
+			goto exit_item_not_supported;
+		if (flow->ibv_attr && cur_item->convert) {
+			err = cur_item->convert(items, cur_item->mask, flow);
+			if (err)
+				goto exit_item_not_supported;
+		}
+		flow->offset += cur_item->dst_sz;
+	}
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+			action.drop = 1;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			const struct rte_flow_action_queue *queue =
+				(const struct rte_flow_action_queue *)
+				actions->conf;
+
+			if (!queue || (queue->index > (priv->rxqs_n - 1)))
+				goto exit_action_not_supported;
+			action.queue = 1;
+		} else {
+			goto exit_action_not_supported;
+		}
+	}
+	if (!action.queue && !action.drop) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "no valid action");
+		return -rte_errno;
+	}
+	return 0;
+exit_item_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+			   items, "item not supported");
+	return -rte_errno;
+exit_action_not_supported:
+	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+			   actions, "action not supported");
+	return -rte_errno;
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -50,15 +425,436 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	struct priv *priv = dev->data->dev_private;
+	int ret;
+	struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
+
+	priv_lock(priv);
+	ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
+	priv_unlock(priv);
+	return ret;
+}
+
+/**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+		     const void *default_mask,
+		     void *data)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_eth *eth;
+	const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+	unsigned int i;
+
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 2;
+	eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*eth = (struct ibv_exp_flow_spec_eth) {
+		.type = IBV_EXP_FLOW_SPEC_ETH,
+		.size = eth_size,
+	};
+	if (!spec)
+		return 0;
+	if (!mask)
+		mask = default_mask;
+	memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+	memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+	memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+	memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+		eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+		eth->val.src_mac[i] &= eth->mask.src_mac[i];
+	}
+	return 0;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+		      const void *default_mask,
+		      void *data)
+{
+	const struct rte_flow_item_ipv4 *spec = item->spec;
+	const struct rte_flow_item_ipv4 *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_ipv4 *ipv4;
+	unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4);
+
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 1;
+	ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
+		.type = IBV_EXP_FLOW_SPEC_IPV4,
+		.size = ipv4_size,
+	};
+	if (!spec)
+		return 0;
+	if (!mask)
+		mask = default_mask;
+	ipv4->val = (struct ibv_exp_flow_ipv4_filter){
+		.src_ip = spec->hdr.src_addr,
+		.dst_ip = spec->hdr.dst_addr,
+	};
+	ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
+		.src_ip = mask->hdr.src_addr,
+		.dst_ip = mask->hdr.dst_addr,
+	};
+	/* Remove unwanted bits from values. */
+	ipv4->val.src_ip &= ipv4->mask.src_ip;
+	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+	return 0;
+}
+
+/**
+ * Convert IPv6 item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+		      const void *default_mask,
+		      void *data)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_ipv6 *ipv6;
+	unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6);
+	unsigned int i;
+
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 1;
+	ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*ipv6 = (struct ibv_exp_flow_spec_ipv6) {
+		.type = IBV_EXP_FLOW_SPEC_IPV6,
+		.size = ipv6_size,
+	};
+	if (!spec)
+		return 0;
+	if (!mask)
+		mask = default_mask;
+	memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
+	       RTE_DIM(ipv6->val.src_ip));
+	memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
+	       RTE_DIM(ipv6->val.dst_ip));
+	memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
+	       RTE_DIM(ipv6->mask.src_ip));
+	memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
+	       RTE_DIM(ipv6->mask.dst_ip));
+	/* Remove unwanted bits from values. */
+	for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
+		ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
+		ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+	}
+	return 0;
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+		     const void *default_mask,
+		     void *data)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_tcp_udp *udp;
+	unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 0;
+	udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_UDP,
+		.size = udp_size,
+	};
+	if (!spec)
+		return 0;
+	if (!mask)
+		mask = default_mask;
+	udp->val.dst_port = spec->hdr.dst_port;
+	udp->val.src_port = spec->hdr.src_port;
+	udp->mask.dst_port = mask->hdr.dst_port;
+	udp->mask.src_port = mask->hdr.src_port;
+	/* Remove unwanted bits from values. */
+	udp->val.src_port &= udp->mask.src_port;
+	udp->val.dst_port &= udp->mask.dst_port;
+	return 0;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+		     const void *default_mask,
+		     void *data)
+{
+	const struct rte_flow_item_tcp *spec = item->spec;
+	const struct rte_flow_item_tcp *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_tcp_udp *tcp;
+	unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 0;
+	tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
+		.type = IBV_EXP_FLOW_SPEC_TCP,
+		.size = tcp_size,
+	};
+	if (!spec)
+		return 0;
+	if (!mask)
+		mask = default_mask;
+	tcp->val.dst_port = spec->hdr.dst_port;
+	tcp->val.src_port = spec->hdr.src_port;
+	tcp->mask.dst_port = mask->hdr.dst_port;
+	tcp->mask.src_port = mask->hdr.src_port;
+	/* Remove unwanted bits from values. */
+	tcp->val.src_port &= tcp->mask.src_port;
+	tcp->val.dst_port &= tcp->mask.dst_port;
+	return 0;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param ibv_attr
+ *   Verbs flow attributes.
+ * @param action
+ *   Target action structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+			      struct ibv_exp_flow_attr *ibv_attr,
+			      struct mlx5_flow_action *action,
+			      struct rte_flow_error *error)
+{
+	struct rxq_ctrl *rxq;
+	struct rte_flow *rte_flow;
+
+	assert(priv->pd);
+	assert(priv->ctx);
+	rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+	if (!rte_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate flow memory");
+		return NULL;
+	}
+	if (action->drop) {
+		rte_flow->cq =
+			ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+					  &(struct ibv_exp_cq_init_attr){
+						  .comp_mask = 0,
+					  });
+		if (!rte_flow->cq) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_HANDLE,
+					   NULL, "cannot allocate CQ");
+			goto error;
+		}
+		rte_flow->wq = ibv_exp_create_wq(priv->ctx,
+						 &(struct ibv_exp_wq_init_attr){
+						 .wq_type = IBV_EXP_WQT_RQ,
+						 .max_recv_wr = 1,
+						 .max_recv_sge = 1,
+						 .pd = priv->pd,
+						 .cq = rte_flow->cq,
+						 });
+	} else {
+		rxq = container_of((*priv->rxqs)[action->queue_id],
+				   struct rxq_ctrl, rxq);
+		rte_flow->rxq = &rxq->rxq;
+		rte_flow->wq = rxq->wq;
+	}
+	rte_flow->ibv_attr = ibv_attr;
+	rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
+		priv->ctx,
+		&(struct ibv_exp_rwq_ind_table_init_attr){
+			.pd = priv->pd,
+			.log_ind_tbl_size = 0,
+			.ind_tbl = &rte_flow->wq,
+			.comp_mask = 0,
+		});
+	if (!rte_flow->ind_table) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate indirection table");
+		goto error;
+	}
+	rte_flow->qp = ibv_exp_create_qp(
+		priv->ctx,
+		&(struct ibv_exp_qp_init_attr){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_EXP_QP_INIT_ATTR_PD |
+				IBV_EXP_QP_INIT_ATTR_PORT |
+				IBV_EXP_QP_INIT_ATTR_RX_HASH,
+			.pd = priv->pd,
+			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+				.rx_hash_function =
+					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_hash_default_key_len,
+				.rx_hash_key = rss_hash_default_key,
+				.rx_hash_fields_mask = 0,
+				.rwq_ind_tbl = rte_flow->ind_table,
+			},
+			.port_num = priv->port,
+		});
+	if (!rte_flow->qp) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate QP");
+		goto error;
+	}
+	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+						 rte_flow->ibv_attr);
+	if (!rte_flow->ibv_flow) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "flow rule creation failure");
+		goto error;
+	}
+	return rte_flow;
+error:
+	assert(rte_flow);
+	if (rte_flow->qp)
+		ibv_destroy_qp(rte_flow->qp);
+	if (rte_flow->ind_table)
+		ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
+	if (!rte_flow->rxq && rte_flow->wq)
+		ibv_exp_destroy_wq(rte_flow->wq);
+	if (!rte_flow->rxq && rte_flow->cq)
+		ibv_destroy_cq(rte_flow->cq);
+	rte_free(rte_flow->ibv_attr);
+	rte_free(rte_flow);
+	return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow on success, NULL otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct rte_flow *rte_flow;
+	struct mlx5_flow_action action;
+	struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
+	int err;
+
+	err = priv_flow_validate(priv, attr, items, actions, error, &flow);
+	if (err)
+		goto exit;
+	flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
+	flow.offset = sizeof(struct ibv_exp_flow_attr);
+	if (!flow.ibv_attr) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot allocate ibv_attr memory");
+		goto exit;
+	}
+	*flow.ibv_attr = (struct ibv_exp_flow_attr){
+		.type = IBV_EXP_FLOW_ATTR_NORMAL,
+		.size = sizeof(struct ibv_exp_flow_attr),
+		.priority = attr->priority,
+		.num_of_specs = 0,
+		.port = 0,
+		.flags = 0,
+		.reserved = 0,
+	};
+	claim_zero(priv_flow_validate(priv, attr, items, actions,
+				      error, &flow));
+	action = (struct mlx5_flow_action){
+		.queue = 0,
+		.drop = 0,
+	};
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+			continue;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			action.queue = 1;
+			action.queue_id =
+				((const struct rte_flow_action_queue *)
+				 actions->conf)->index;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+			action.drop = 1;
+		} else {
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "unsupported action");
+			goto exit;
+		}
+	}
+	rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
+						 &action, error);
+	return rte_flow;
+exit:
+	rte_free(flow.ibv_attr);
+	return NULL;
 }
 
 /**
@@ -74,15 +870,46 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		 const struct rte_flow_action actions[],
 		 struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)attr;
-	(void)items;
-	(void)actions;
-	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return NULL;
+	struct priv *priv = dev->data->dev_private;
+	struct rte_flow *flow;
+
+	priv_lock(priv);
+	flow = priv_flow_create(priv, attr, items, actions, error);
+	if (flow) {
+		LIST_INSERT_HEAD(&priv->flows, flow, next);
+		DEBUG("Flow created %p", (void *)flow);
+	}
+	priv_unlock(priv);
+	return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param[in] flow
+ *   Flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv,
+		  struct rte_flow *flow)
+{
+	(void)priv;
+	LIST_REMOVE(flow, next);
+	if (flow->ibv_flow)
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+	if (flow->qp)
+		claim_zero(ibv_destroy_qp(flow->qp));
+	if (flow->ind_table)
+		claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
+	if (!flow->rxq && flow->wq)
+		claim_zero(ibv_exp_destroy_wq(flow->wq));
+	if (!flow->rxq && flow->cq)
+		claim_zero(ibv_destroy_cq(flow->cq));
+	rte_free(flow->ibv_attr);
+	DEBUG("Flow destroyed %p", (void *)flow);
+	rte_free(flow);
 }
 
 /**
@@ -96,13 +923,30 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
 		  struct rte_flow *flow,
 		  struct rte_flow_error *error)
 {
-	(void)dev;
-	(void)flow;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_destroy(priv, flow);
+	priv_unlock(priv);
+	return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ */
+static void
+priv_flow_flush(struct priv *priv)
+{
+	while (!LIST_EMPTY(&priv->flows)) {
+		struct rte_flow *flow;
+
+		flow = LIST_FIRST(&priv->flows);
+		priv_flow_destroy(priv, flow);
+	}
 }
 
 /**
@@ -115,10 +959,62 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
-	(void)dev;
+	struct priv *priv = dev->data->dev_private;
+
 	(void)error;
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL, "not implemented yet");
-	return -rte_errno;
+	priv_lock(priv);
+	priv_flow_flush(priv);
+	priv_unlock(priv);
+	return 0;
+}
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ */
+void
+priv_flow_stop(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+		flow->ibv_flow = NULL;
+		DEBUG("Flow %p removed", (void *)flow);
+	}
+}
+
+/**
+ * Add all flows.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+priv_flow_start(struct priv *priv)
+{
+	struct rte_flow *flow;
+
+	for (flow = LIST_FIRST(&priv->flows);
+	     flow;
+	     flow = LIST_NEXT(flow, next)) {
+		flow->ibv_flow = ibv_exp_create_flow(flow->qp,
+						     flow->ibv_attr);
+		if (!flow->ibv_flow) {
+			DEBUG("Flow %p cannot be applied", (void *)flow);
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+		DEBUG("Flow %p applied", (void *)flow);
+	}
+	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d4dccd8..2399243 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -90,6 +90,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
 		priv_fdir_enable(priv);
 	priv_dev_interrupt_handler_install(priv, dev);
+	err = priv_flow_start(priv);
 	priv_unlock(priv);
 	return -err;
 }
@@ -120,6 +121,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv_mac_addrs_disable(priv);
 	priv_destroy_hash_rxqs(priv);
 	priv_fdir_disable(priv);
+	priv_flow_stop(priv);
 	priv_dev_interrupt_handler_uninstall(priv, dev);
 	priv->started = 0;
 	priv_unlock(priv);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v5 3/6] net/mlx5: support VLAN flow item
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
                         ` (2 preceding siblings ...)
  2016-12-29 15:15       ` [PATCH v5 2/6] net/mlx5: support basic flow items and actions Nelio Laranjeiro
@ 2016-12-29 15:15       ` Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 4/6] net/mlx5: support VXLAN " Nelio Laranjeiro
                         ` (2 subsequent siblings)
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-29 15:15 UTC (permalink / raw)
  To: dev, Ferruh Yigit; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 62 +++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 61 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4f6696e..8f2f4d5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -57,6 +57,11 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
 		     void *data);
 
 static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+		      const void *default_mask,
+		      void *data);
+
+static int
 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
 		      const void *default_mask,
 		      void *data);
@@ -136,7 +141,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
 	},
 	[RTE_FLOW_ITEM_TYPE_ETH] = {
-		.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
+			       RTE_FLOW_ITEM_TYPE_IPV4,
 			       RTE_FLOW_ITEM_TYPE_IPV6),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_eth){
@@ -147,6 +153,17 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.convert = mlx5_flow_create_eth,
 		.dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
 	},
+	[RTE_FLOW_ITEM_TYPE_VLAN] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+			       RTE_FLOW_ITEM_TYPE_IPV6),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_vlan){
+			.tci = -1,
+		},
+		.mask_sz = sizeof(struct rte_flow_item_vlan),
+		.convert = mlx5_flow_create_vlan,
+		.dst_sz = 0,
+	},
 	[RTE_FLOW_ITEM_TYPE_IPV4] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
 			       RTE_FLOW_ITEM_TYPE_TCP),
@@ -355,6 +372,17 @@ priv_flow_validate(struct priv *priv,
 
 		if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
 			continue;
+		/* Handle special situation for VLAN. */
+		if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (((const struct rte_flow_item_vlan *)items)->tci >
+			    ETHER_MAX_VLAN_ID) {
+				rte_flow_error_set(error, ENOTSUP,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   items,
+						   "wrong VLAN id value");
+				return -rte_errno;
+			}
+		}
 		for (i = 0;
 		     cur_item->items &&
 		     cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
@@ -481,6 +509,38 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+		      const void *default_mask,
+		      void *data)
+{
+	const struct rte_flow_item_vlan *spec = item->spec;
+	const struct rte_flow_item_vlan *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_eth *eth;
+	const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+
+	eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
+	if (!spec)
+		return 0;
+	if (!mask)
+		mask = default_mask;
+	eth->val.vlan_tag = spec->tci;
+	eth->mask.vlan_tag = mask->tci;
+	eth->val.vlan_tag &= eth->mask.vlan_tag;
+	return 0;
+}
+
+/**
  * Convert IPv4 item to Verbs specification.
  *
  * @param item[in]
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v5 4/6] net/mlx5: support VXLAN flow item
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
                         ` (3 preceding siblings ...)
  2016-12-29 15:15       ` [PATCH v5 3/6] net/mlx5: support VLAN flow item Nelio Laranjeiro
@ 2016-12-29 15:15       ` Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 5/6] net/mlx5: support mark flow action Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 6/6] net/mlx5: extend IPv4 flow item Nelio Laranjeiro
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-29 15:15 UTC (permalink / raw)
  To: dev, Ferruh Yigit; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 78 ++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 72 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8f2f4d5..093c140 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -81,6 +81,11 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
 		     const void *default_mask,
 		     void *data);
 
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item,
+		       const void *default_mask,
+		       void *data);
+
 struct rte_flow {
 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
 	struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
@@ -138,7 +143,8 @@ static const enum rte_flow_action_type valid_actions[] = {
 /** Graph of supported items and associated actions. */
 static const struct mlx5_flow_items mlx5_flow_items[] = {
 	[RTE_FLOW_ITEM_TYPE_END] = {
-		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+			       RTE_FLOW_ITEM_TYPE_VXLAN),
 	},
 	[RTE_FLOW_ITEM_TYPE_ETH] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
@@ -203,6 +209,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6),
 	},
 	[RTE_FLOW_ITEM_TYPE_UDP] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_udp){
 			.hdr = {
@@ -226,12 +233,23 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.convert = mlx5_flow_create_tcp,
 		.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
 	},
+	[RTE_FLOW_ITEM_TYPE_VXLAN] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_vxlan){
+			.vni = "\xff\xff\xff",
+		},
+		.mask_sz = sizeof(struct rte_flow_item_vxlan),
+		.convert = mlx5_flow_create_vxlan,
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
+	},
 };
 
 /** Structure to pass to the conversion function. */
 struct mlx5_flow {
 	struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
 	unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+	uint32_t inner; /**< Set once VXLAN is encountered. */
 };
 
 struct mlx5_flow_action {
@@ -489,7 +507,7 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
 	flow->ibv_attr->priority = 2;
 	eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*eth = (struct ibv_exp_flow_spec_eth) {
-		.type = IBV_EXP_FLOW_SPEC_ETH,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
 		.size = eth_size,
 	};
 	if (!spec)
@@ -565,7 +583,7 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
 	flow->ibv_attr->priority = 1;
 	ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
-		.type = IBV_EXP_FLOW_SPEC_IPV4,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4,
 		.size = ipv4_size,
 	};
 	if (!spec)
@@ -612,7 +630,7 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
 	flow->ibv_attr->priority = 1;
 	ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*ipv6 = (struct ibv_exp_flow_spec_ipv6) {
-		.type = IBV_EXP_FLOW_SPEC_IPV6,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6,
 		.size = ipv6_size,
 	};
 	if (!spec)
@@ -660,7 +678,7 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
 	flow->ibv_attr->priority = 0;
 	udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
-		.type = IBV_EXP_FLOW_SPEC_UDP,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
 		.size = udp_size,
 	};
 	if (!spec)
@@ -702,7 +720,7 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
 	flow->ibv_attr->priority = 0;
 	tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
-		.type = IBV_EXP_FLOW_SPEC_TCP,
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
 		.size = tcp_size,
 	};
 	if (!spec)
@@ -720,6 +738,53 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert VXLAN item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ */
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item,
+		       const void *default_mask,
+		       void *data)
+{
+	const struct rte_flow_item_vxlan *spec = item->spec;
+	const struct rte_flow_item_vxlan *mask = item->mask;
+	struct mlx5_flow *flow = (struct mlx5_flow *)data;
+	struct ibv_exp_flow_spec_tunnel *vxlan;
+	unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
+	union vni {
+		uint32_t vlan_id;
+		uint8_t vni[4];
+	} id;
+
+	++flow->ibv_attr->num_of_specs;
+	flow->ibv_attr->priority = 0;
+	id.vni[0] = 0;
+	vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*vxlan = (struct ibv_exp_flow_spec_tunnel) {
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
+		.size = size,
+	};
+	flow->inner = IBV_EXP_FLOW_SPEC_INNER;
+	if (!spec)
+		return 0;
+	if (!mask)
+		mask = default_mask;
+	memcpy(&id.vni[1], spec->vni, 3);
+	vxlan->val.tunnel_id = id.vlan_id;
+	memcpy(&id.vni[1], mask->vni, 3);
+	vxlan->mask.tunnel_id = id.vlan_id;
+	/* Remove unwanted bits from values. */
+	vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
+	return 0;
+}
+
+/**
  * Complete flow rule creation.
  *
  * @param priv
@@ -886,6 +951,7 @@ priv_flow_create(struct priv *priv,
 		.flags = 0,
 		.reserved = 0,
 	};
+	flow.inner = 0;
 	claim_zero(priv_flow_validate(priv, attr, items, actions,
 				      error, &flow));
 	action = (struct mlx5_flow_action){
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v5 5/6] net/mlx5: support mark flow action
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
                         ` (4 preceding siblings ...)
  2016-12-29 15:15       ` [PATCH v5 4/6] net/mlx5: support VXLAN " Nelio Laranjeiro
@ 2016-12-29 15:15       ` Nelio Laranjeiro
  2016-12-29 15:15       ` [PATCH v5 6/6] net/mlx5: extend IPv4 flow item Nelio Laranjeiro
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-29 15:15 UTC (permalink / raw)
  To: dev, Ferruh Yigit; +Cc: Adrien Mazarguil

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_prm.h  | 70 ++++++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.c | 12 ++++++-
 drivers/net/mlx5/mlx5_rxtx.h |  3 +-
 4 files changed, 160 insertions(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 093c140..0e7ea99 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -50,6 +50,7 @@
 #include <rte_malloc.h>
 
 #include "mlx5.h"
+#include "mlx5_prm.h"
 
 static int
 mlx5_flow_create_eth(const struct rte_flow_item *item,
@@ -95,6 +96,7 @@ struct rte_flow {
 	struct ibv_exp_wq *wq; /**< Verbs work queue. */
 	struct ibv_cq *cq; /**< Verbs completion queue. */
 	struct rxq *rxq; /**< Pointer to the queue, NULL if drop queue. */
+	uint32_t mark:1; /**< Set if the flow is marked. */
 };
 
 /** Static initializer for items. */
@@ -137,6 +139,7 @@ struct mlx5_flow_items {
 static const enum rte_flow_action_type valid_actions[] = {
 	RTE_FLOW_ACTION_TYPE_DROP,
 	RTE_FLOW_ACTION_TYPE_QUEUE,
+	RTE_FLOW_ACTION_TYPE_MARK,
 	RTE_FLOW_ACTION_TYPE_END,
 };
 
@@ -255,7 +258,9 @@ struct mlx5_flow {
 struct mlx5_flow_action {
 	uint32_t queue:1; /**< Target is a receive queue. */
 	uint32_t drop:1; /**< Target is a drop queue. */
+	uint32_t mark:1; /**< Mark is present in the flow. */
 	uint32_t queue_id; /**< Identifier of the queue. */
+	uint32_t mark_id; /**< Mark identifier. */
 };
 
 /**
@@ -352,6 +357,7 @@ priv_flow_validate(struct priv *priv,
 	struct mlx5_flow_action action = {
 		.queue = 0,
 		.drop = 0,
+		.mark = 0,
 	};
 
 	(void)priv;
@@ -438,10 +444,26 @@ priv_flow_validate(struct priv *priv,
 			if (!queue || (queue->index > (priv->rxqs_n - 1)))
 				goto exit_action_not_supported;
 			action.queue = 1;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
+			const struct rte_flow_action_mark *mark =
+				(const struct rte_flow_action_mark *)
+				actions->conf;
+
+			if (mark && (mark->id >= MLX5_FLOW_MARK_MAX)) {
+				rte_flow_error_set(error, ENOTSUP,
+						   RTE_FLOW_ERROR_TYPE_ACTION,
+						   actions,
+						   "mark must be between 0"
+						   " and 16777199");
+				return -rte_errno;
+			}
+			action.mark = 1;
 		} else {
 			goto exit_action_not_supported;
 		}
 	}
+	if (action.mark && !flow->ibv_attr)
+		flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
 	if (!action.queue && !action.drop) {
 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
 				   NULL, "no valid action");
@@ -785,6 +807,30 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert mark/flag action to Verbs specification.
+ *
+ * @param flow
+ *   Pointer to MLX5 flow structure.
+ * @param mark_id
+ *   Mark identifier.
+ */
+static int
+mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
+{
+	struct ibv_exp_flow_spec_action_tag *tag;
+	unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
+
+	tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+	*tag = (struct ibv_exp_flow_spec_action_tag){
+		.type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
+		.size = size,
+		.tag_id = mlx5_flow_mark_set(mark_id),
+	};
+	++flow->ibv_attr->num_of_specs;
+	return 0;
+}
+
+/**
  * Complete flow rule creation.
  *
  * @param priv
@@ -840,8 +886,10 @@ priv_flow_create_action_queue(struct priv *priv,
 		rxq = container_of((*priv->rxqs)[action->queue_id],
 				   struct rxq_ctrl, rxq);
 		rte_flow->rxq = &rxq->rxq;
+		rxq->rxq.mark |= action->mark;
 		rte_flow->wq = rxq->wq;
 	}
+	rte_flow->mark = action->mark;
 	rte_flow->ibv_attr = ibv_attr;
 	rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
 		priv->ctx,
@@ -957,6 +1005,8 @@ priv_flow_create(struct priv *priv,
 	action = (struct mlx5_flow_action){
 		.queue = 0,
 		.drop = 0,
+		.mark = 0,
+		.mark_id = MLX5_FLOW_MARK_DEFAULT,
 	};
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
@@ -968,6 +1018,14 @@ priv_flow_create(struct priv *priv,
 				 actions->conf)->index;
 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
 			action.drop = 1;
+		} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
+			const struct rte_flow_action_mark *mark =
+				(const struct rte_flow_action_mark *)
+				actions->conf;
+
+			if (mark)
+				action.mark_id = mark->id;
+			action.mark = 1;
 		} else {
 			rte_flow_error_set(error, ENOTSUP,
 					   RTE_FLOW_ERROR_TYPE_ACTION,
@@ -975,6 +1033,10 @@ priv_flow_create(struct priv *priv,
 			goto exit;
 		}
 	}
+	if (action.mark) {
+		mlx5_flow_create_flag_mark(&flow, action.mark_id);
+		flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+	}
 	rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
 						 &action, error);
 	return rte_flow;
@@ -1033,6 +1095,18 @@ priv_flow_destroy(struct priv *priv,
 		claim_zero(ibv_exp_destroy_wq(flow->wq));
 	if (!flow->rxq && flow->cq)
 		claim_zero(ibv_destroy_cq(flow->cq));
+	if (flow->mark) {
+		struct rte_flow *tmp;
+		uint32_t mark_n = 0;
+
+		for (tmp = LIST_FIRST(&priv->flows);
+		     tmp;
+		     tmp = LIST_NEXT(tmp, next)) {
+			if ((flow->rxq == tmp->rxq) && tmp->mark)
+				++mark_n;
+		}
+		flow->rxq->mark = !!mark_n;
+	}
 	rte_free(flow->ibv_attr);
 	DEBUG("Flow destroyed %p", (void *)flow);
 	rte_free(flow);
@@ -1112,6 +1186,8 @@ priv_flow_stop(struct priv *priv)
 	     flow = LIST_NEXT(flow, next)) {
 		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
 		flow->ibv_flow = NULL;
+		if (flow->mark)
+			flow->rxq->mark = 0;
 		DEBUG("Flow %p removed", (void *)flow);
 	}
 }
@@ -1141,6 +1217,8 @@ priv_flow_start(struct priv *priv)
 			return rte_errno;
 		}
 		DEBUG("Flow %p applied", (void *)flow);
+		if (flow->rxq)
+			flow->rxq->mark |= flow->mark;
 	}
 	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 9cd9fdf..d9bb332 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -34,6 +34,8 @@
 #ifndef RTE_PMD_MLX5_PRM_H_
 #define RTE_PMD_MLX5_PRM_H_
 
+#include <assert.h>
+
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
 #ifdef PEDANTIC
@@ -106,6 +108,15 @@
 /* Outer UDP header and checksum OK. */
 #define MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK (1u << 6)
 
+/* INVALID is used by packets matching no flow rules. */
+#define MLX5_FLOW_MARK_INVALID 0
+
+/* Maximum allowed value to mark a packet. */
+#define MLX5_FLOW_MARK_MAX 0xfffff0
+
+/* Default mark value used when none is provided. */
+#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+
 /* Subset of struct mlx5_wqe_eth_seg. */
 struct mlx5_wqe_eth_seg_small {
 	uint32_t rsvd0;
@@ -183,10 +194,67 @@ struct mlx5_cqe {
 	uint8_t rsvd2[12];
 	uint32_t byte_cnt;
 	uint64_t timestamp;
-	uint8_t rsvd3[4];
+	uint32_t sop_drop_qpn;
 	uint16_t wqe_counter;
 	uint8_t rsvd4;
 	uint8_t op_own;
 };
 
+/**
+ * Convert a user mark to flow mark.
+ *
+ * @param val
+ *   Mark value to convert.
+ *
+ * @return
+ *   Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_set(uint32_t val)
+{
+	uint32_t ret;
+
+	/*
+	 * Add one to the user value to differentiate un-marked flows from
+	 * marked flows.
+	 */
+	++val;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+	/*
+	 * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
+	 * word, byte-swapped by the kernel on little-endian systems. In this
+	 * case, left-shifting the resulting big-endian value ensures the
+	 * least significant 24 bits are retained when converting it back.
+	 */
+	ret = rte_cpu_to_be_32(val) >> 8;
+#else
+	ret = val;
+#endif
+	assert(ret <= MLX5_FLOW_MARK_MAX);
+	return ret;
+}
+
+/**
+ * Convert a mark to user mark.
+ *
+ * @param val
+ *   Mark value to convert.
+ *
+ * @return
+ *   Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_get(uint32_t val)
+{
+	/*
+	 * Subtract one from the retrieved value. It was added by
+	 * mlx5_flow_mark_set() to distinguish unmarked flows.
+	 */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+	return (val >> 8) - 1;
+#else
+	return val - 1;
+#endif
+}
+
 #endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 6f86ded..8f0b4a6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -113,7 +113,7 @@ static inline int
 check_cqe_seen(volatile struct mlx5_cqe *cqe)
 {
 	static const uint8_t magic[] = "seen";
-	volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3;
+	volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
 	int ret = 1;
 	unsigned int i;
 
@@ -1357,6 +1357,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 				pkt->hash.rss = rss_hash_res;
 				pkt->ol_flags = PKT_RX_RSS_HASH;
 			}
+			if (rxq->mark &&
+			    ((cqe->sop_drop_qpn !=
+			      htonl(MLX5_FLOW_MARK_INVALID)) ||
+			     (cqe->sop_drop_qpn !=
+			      htonl(MLX5_FLOW_MARK_DEFAULT)))) {
+				pkt->hash.fdir.hi =
+					mlx5_flow_mark_get(cqe->sop_drop_qpn);
+				pkt->ol_flags &= ~PKT_RX_RSS_HASH;
+				pkt->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+			}
 			if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
 			    rxq->crc_present) {
 				if (rxq->csum) {
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index e244c48..302ca49 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -114,7 +114,8 @@ struct rxq {
 	unsigned int elts_n:4; /* Log 2 of Mbufs. */
 	unsigned int port_id:8;
 	unsigned int rss_hash:1; /* RSS hash result is enabled. */
-	unsigned int :9; /* Remaining bits. */
+	unsigned int mark:1; /* Marked flow available on the queue. */
+	unsigned int :8; /* Remaining bits. */
 	volatile uint32_t *rq_db;
 	volatile uint32_t *cq_db;
 	uint16_t rq_ci;
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* [PATCH v5 6/6] net/mlx5: extend IPv4 flow item
  2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
                         ` (5 preceding siblings ...)
  2016-12-29 15:15       ` [PATCH v5 5/6] net/mlx5: support mark flow action Nelio Laranjeiro
@ 2016-12-29 15:15       ` Nelio Laranjeiro
  6 siblings, 0 replies; 38+ messages in thread
From: Nelio Laranjeiro @ 2016-12-29 15:15 UTC (permalink / raw)
  To: dev, Ferruh Yigit; +Cc: Adrien Mazarguil

This commits adds:
- Type of service
- Next protocol ID

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 40 ++++++++++++++++++++++++++++++++--------
 1 file changed, 32 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 0e7ea99..d7ed686 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -111,6 +111,12 @@ struct mlx5_flow_items {
 	const enum rte_flow_action_type *const actions;
 	/** Bit-masks corresponding to the possibilities for the item. */
 	const void *mask;
+	/**
+	 * Default bit-masks to use when item->mask is not provided. When
+	 * \default_mask is also NULL, the full supported bit-mask (\mask) is
+	 * used instead.
+	 */
+	const void *default_mask;
 	/** Bit-masks size in bytes. */
 	const unsigned int mask_sz;
 	/**
@@ -181,11 +187,19 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 			.hdr = {
 				.src_addr = -1,
 				.dst_addr = -1,
+				.type_of_service = -1,
+				.next_proto_id = -1,
+			},
+		},
+		.default_mask = &(const struct rte_flow_item_ipv4){
+			.hdr = {
+				.src_addr = -1,
+				.dst_addr = -1,
 			},
 		},
 		.mask_sz = sizeof(struct rte_flow_item_ipv4),
 		.convert = mlx5_flow_create_ipv4,
-		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4),
+		.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
 	},
 	[RTE_FLOW_ITEM_TYPE_IPV6] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
@@ -425,7 +439,11 @@ priv_flow_validate(struct priv *priv,
 		if (err)
 			goto exit_item_not_supported;
 		if (flow->ibv_attr && cur_item->convert) {
-			err = cur_item->convert(items, cur_item->mask, flow);
+			err = cur_item->convert(items,
+						(cur_item->default_mask ?
+						 cur_item->default_mask :
+						 cur_item->mask),
+						flow);
 			if (err)
 				goto exit_item_not_supported;
 		}
@@ -598,31 +616,37 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
 	const struct rte_flow_item_ipv4 *spec = item->spec;
 	const struct rte_flow_item_ipv4 *mask = item->mask;
 	struct mlx5_flow *flow = (struct mlx5_flow *)data;
-	struct ibv_exp_flow_spec_ipv4 *ipv4;
-	unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4);
+	struct ibv_exp_flow_spec_ipv4_ext *ipv4;
+	unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
 
 	++flow->ibv_attr->num_of_specs;
 	flow->ibv_attr->priority = 1;
 	ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
-	*ipv4 = (struct ibv_exp_flow_spec_ipv4) {
-		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4,
+	*ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
+		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
 		.size = ipv4_size,
 	};
 	if (!spec)
 		return 0;
 	if (!mask)
 		mask = default_mask;
-	ipv4->val = (struct ibv_exp_flow_ipv4_filter){
+	ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
 		.src_ip = spec->hdr.src_addr,
 		.dst_ip = spec->hdr.dst_addr,
+		.proto = spec->hdr.next_proto_id,
+		.tos = spec->hdr.type_of_service,
 	};
-	ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
+	ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
 		.src_ip = mask->hdr.src_addr,
 		.dst_ip = mask->hdr.dst_addr,
+		.proto = mask->hdr.next_proto_id,
+		.tos = mask->hdr.type_of_service,
 	};
 	/* Remove unwanted bits from values. */
 	ipv4->val.src_ip &= ipv4->mask.src_ip;
 	ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+	ipv4->val.proto &= ipv4->mask.proto;
+	ipv4->val.tos &= ipv4->mask.tos;
 	return 0;
 }
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 38+ messages in thread

* Re: [PATCH v5 0/6] net/mlx5: support flow API
  2016-12-29 15:15       ` [PATCH v5 " Nelio Laranjeiro
@ 2017-01-03 16:19         ` Ferruh Yigit
  2017-01-04 14:48         ` Ferruh Yigit
  1 sibling, 0 replies; 38+ messages in thread
From: Ferruh Yigit @ 2017-01-03 16:19 UTC (permalink / raw)
  To: Nelio Laranjeiro, dev; +Cc: Adrien Mazarguil

On 12/29/2016 3:15 PM, Nelio Laranjeiro wrote:
> Changes in v5:
> 
>  - Fix masking when only spec is present in item structure.
>  - Fix first element of flow items array.
> 
> Changes in v4:
> 
>  - Simplify flow parsing by using a graph.
>  - Add VXLAN flow item.
>  - Add mark flow action.
>  - Extend IPv4 filter item (Type of service, Next Protocol ID).
> 
> Changes in v3:
> 
>  - Fix Ethernet ether type issue.
> 
> Changes in v2:
> 
>  - Fix several issues.
>  - Support VLAN filtering.
> 
> Nelio Laranjeiro (6):
>   net/mlx5: add preliminary flow API support
>   net/mlx5: support basic flow items and actions
>   net/mlx5: support VLAN flow item
>   net/mlx5: support VXLAN flow item
>   net/mlx5: support mark flow action
>   net/mlx5: extend IPv4 flow item

This patch is giving ICC warnings [1], but please check:
http://dpdk.org/dev/patchwork/patch/18808/



[1]
.../drivers/net/mlx5/mlx5_flow.c(550): error #188: enumerated type mixed
with another type
                .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
                        ^

.../drivers/net/mlx5/mlx5_flow.c(626): error #188: enumerated type mixed
with another type
                .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
                        ^

.../drivers/net/mlx5/mlx5_flow.c(679): error #188: enumerated type mixed
with another type
                .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6,
                        ^

.../drivers/net/mlx5/mlx5_flow.c(727): error #188: enumerated type mixed
with another type
                .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
                        ^

.../drivers/net/mlx5/mlx5_flow.c(769): error #188: enumerated type mixed
with another type
                .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
                        ^

.../drivers/net/mlx5/mlx5_flow.c(816): error #188: enumerated type mixed
with another type
                .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
                        ^

> 
>  drivers/net/mlx5/Makefile       |    1 +
>  drivers/net/mlx5/mlx5.h         |   19 +
>  drivers/net/mlx5/mlx5_fdir.c    |   15 +
>  drivers/net/mlx5/mlx5_flow.c    | 1248 +++++++++++++++++++++++++++++++++++++++
>  drivers/net/mlx5/mlx5_prm.h     |   70 ++-
>  drivers/net/mlx5/mlx5_rxtx.c    |   12 +-
>  drivers/net/mlx5/mlx5_rxtx.h    |    3 +-
>  drivers/net/mlx5/mlx5_trigger.c |    2 +
>  8 files changed, 1367 insertions(+), 3 deletions(-)
>  create mode 100644 drivers/net/mlx5/mlx5_flow.c
> 

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v5 0/6] net/mlx5: support flow API
  2016-12-29 15:15       ` [PATCH v5 " Nelio Laranjeiro
  2017-01-03 16:19         ` Ferruh Yigit
@ 2017-01-04 14:48         ` Ferruh Yigit
  1 sibling, 0 replies; 38+ messages in thread
From: Ferruh Yigit @ 2017-01-04 14:48 UTC (permalink / raw)
  To: Nelio Laranjeiro, dev; +Cc: Adrien Mazarguil

On 12/29/2016 3:15 PM, Nelio Laranjeiro wrote:
> Changes in v5:
> 
>  - Fix masking when only spec is present in item structure.
>  - Fix first element of flow items array.
> 
> Changes in v4:
> 
>  - Simplify flow parsing by using a graph.
>  - Add VXLAN flow item.
>  - Add mark flow action.
>  - Extend IPv4 filter item (Type of service, Next Protocol ID).
> 
> Changes in v3:
> 
>  - Fix Ethernet ether type issue.
> 
> Changes in v2:
> 
>  - Fix several issues.
>  - Support VLAN filtering.
> 
> Nelio Laranjeiro (6):
>   net/mlx5: add preliminary flow API support
>   net/mlx5: support basic flow items and actions
>   net/mlx5: support VLAN flow item
>   net/mlx5: support VXLAN flow item
>   net/mlx5: support mark flow action
>   net/mlx5: extend IPv4 flow item
> 

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v5 2/6] net/mlx5: support basic flow items and actions
  2016-12-29 15:15       ` [PATCH v5 2/6] net/mlx5: support basic flow items and actions Nelio Laranjeiro
@ 2017-01-04 17:49         ` Ferruh Yigit
  2017-01-04 18:42           ` Adrien Mazarguil
  0 siblings, 1 reply; 38+ messages in thread
From: Ferruh Yigit @ 2017-01-04 17:49 UTC (permalink / raw)
  To: Nelio Laranjeiro, dev; +Cc: Adrien Mazarguil

Hi Nelio,

A quick question.

On 12/29/2016 3:15 PM, Nelio Laranjeiro wrote:
> Introduce initial software for rte_flow rules.
> 
> VLAN, VXLAN are still not supported.
> 
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>

<...>

> +static int
> +priv_flow_validate(struct priv *priv,
> +		   const struct rte_flow_attr *attr,
> +		   const struct rte_flow_item items[],
> +		   const struct rte_flow_action actions[],
> +		   struct rte_flow_error *error,
> +		   struct mlx5_flow *flow)
> +{
> +	const struct mlx5_flow_items *cur_item = mlx5_flow_items;

<...>

> +	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
<...>
> +	}
> +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
<...>
> +	}

Is it guarantied in somewhere that items or actions terminated with
TYPE_END?
And these fields are direct inputs from user.
Is there a way to verify user provided values are with TYPE_END terminated?

<...>

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v5 2/6] net/mlx5: support basic flow items and actions
  2017-01-04 17:49         ` Ferruh Yigit
@ 2017-01-04 18:42           ` Adrien Mazarguil
  2017-01-06 13:52             ` Ferruh Yigit
  0 siblings, 1 reply; 38+ messages in thread
From: Adrien Mazarguil @ 2017-01-04 18:42 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Nelio Laranjeiro, dev

Hi Ferruh,

On Wed, Jan 04, 2017 at 05:49:46PM +0000, Ferruh Yigit wrote:
> Hi Nelio,
> 
> A quick question.

I'll reply since it's related to the API.

> On 12/29/2016 3:15 PM, Nelio Laranjeiro wrote:
> > Introduce initial software for rte_flow rules.
> > 
> > VLAN, VXLAN are still not supported.
> > 
> > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> > Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> 
> <...>
> 
> > +static int
> > +priv_flow_validate(struct priv *priv,
> > +		   const struct rte_flow_attr *attr,
> > +		   const struct rte_flow_item items[],
> > +		   const struct rte_flow_action actions[],
> > +		   struct rte_flow_error *error,
> > +		   struct mlx5_flow *flow)
> > +{
> > +	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
> 
> <...>
> 
> > +	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
> <...>
> > +	}
> > +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
> <...>
> > +	}
> 
> Is it guarantied in somewhere that items or actions terminated with
> TYPE_END?

Yes, since it's now the only way to terminate items/actions lists [1][2].
There used to be a "max" value in the original draft but it seemed redundant
and proved annoying to use, and was therefore dropped.

END items/actions behave like a NUL terminator for C strings. They are
likewise defined with value 0 for convenience.

> And these fields are direct inputs from user.
> Is there a way to verify user provided values are with TYPE_END terminated?

No, applications must check for its presence (they normally add it
themselves) before feeding these lists to PMDs. I think that's safe enough.

Note the testpmd flow command does not allow entering a flow rule without
"end" tokens in both lists, there is no way around this restriction.

[1] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#matching-pattern
[2] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#actions

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v5 2/6] net/mlx5: support basic flow items and actions
  2017-01-04 18:42           ` Adrien Mazarguil
@ 2017-01-06 13:52             ` Ferruh Yigit
  2017-01-09 15:29               ` Adrien Mazarguil
  0 siblings, 1 reply; 38+ messages in thread
From: Ferruh Yigit @ 2017-01-06 13:52 UTC (permalink / raw)
  To: Adrien Mazarguil; +Cc: Nelio Laranjeiro, dev

On 1/4/2017 6:42 PM, Adrien Mazarguil wrote:
> Hi Ferruh,
> 
> On Wed, Jan 04, 2017 at 05:49:46PM +0000, Ferruh Yigit wrote:
>> Hi Nelio,
>>
>> A quick question.
> 
> I'll reply since it's related to the API.
> 
>> On 12/29/2016 3:15 PM, Nelio Laranjeiro wrote:
>>> Introduce initial software for rte_flow rules.
>>>
>>> VLAN, VXLAN are still not supported.
>>>
>>> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
>>> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
>>
>> <...>
>>
>>> +static int
>>> +priv_flow_validate(struct priv *priv,
>>> +		   const struct rte_flow_attr *attr,
>>> +		   const struct rte_flow_item items[],
>>> +		   const struct rte_flow_action actions[],
>>> +		   struct rte_flow_error *error,
>>> +		   struct mlx5_flow *flow)
>>> +{
>>> +	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
>>
>> <...>
>>
>>> +	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
>> <...>
>>> +	}
>>> +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
>> <...>
>>> +	}
>>
>> Is it guarantied in somewhere that items or actions terminated with
>> TYPE_END?
> 
> Yes, since it's now the only way to terminate items/actions lists [1][2].
> There used to be a "max" value in the original draft but it seemed redundant
> and proved annoying to use, and was therefore dropped.
> 
> END items/actions behave like a NUL terminator for C strings. They are
> likewise defined with value 0 for convenience.

At least it is good idea to set END values to 0, but still if user not
set it, most probably this will crash the app.

Although most probably this kind of error will be detected easily in
development phase, still it would be nice to return an error instead of
crashing when user provide wrong input.

> 
>> And these fields are direct inputs from user.
>> Is there a way to verify user provided values are with TYPE_END terminated?
> 
> No, applications must check for its presence (they normally add it
> themselves) before feeding these lists to PMDs. I think that's safe enough.
> 
> Note the testpmd flow command does not allow entering a flow rule without
> "end" tokens in both lists, there is no way around this restriction.
> 
> [1] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#matching-pattern
> [2] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#actions
> 

^ permalink raw reply	[flat|nested] 38+ messages in thread

* Re: [PATCH v5 2/6] net/mlx5: support basic flow items and actions
  2017-01-06 13:52             ` Ferruh Yigit
@ 2017-01-09 15:29               ` Adrien Mazarguil
  0 siblings, 0 replies; 38+ messages in thread
From: Adrien Mazarguil @ 2017-01-09 15:29 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Nelio Laranjeiro, dev

Hi Ferruh,

On Fri, Jan 06, 2017 at 01:52:53PM +0000, Ferruh Yigit wrote:
> On 1/4/2017 6:42 PM, Adrien Mazarguil wrote:
> > Hi Ferruh,
> > 
> > On Wed, Jan 04, 2017 at 05:49:46PM +0000, Ferruh Yigit wrote:
> >> Hi Nelio,
> >>
> >> A quick question.
> > 
> > I'll reply since it's related to the API.
> > 
> >> On 12/29/2016 3:15 PM, Nelio Laranjeiro wrote:
> >>> Introduce initial software for rte_flow rules.
> >>>
> >>> VLAN, VXLAN are still not supported.
> >>>
> >>> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> >>> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> >>
> >> <...>
> >>
> >>> +static int
> >>> +priv_flow_validate(struct priv *priv,
> >>> +		   const struct rte_flow_attr *attr,
> >>> +		   const struct rte_flow_item items[],
> >>> +		   const struct rte_flow_action actions[],
> >>> +		   struct rte_flow_error *error,
> >>> +		   struct mlx5_flow *flow)
> >>> +{
> >>> +	const struct mlx5_flow_items *cur_item = mlx5_flow_items;
> >>
> >> <...>
> >>
> >>> +	for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
> >> <...>
> >>> +	}
> >>> +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
> >> <...>
> >>> +	}
> >>
> >> Is it guarantied in somewhere that items or actions terminated with
> >> TYPE_END?
> > 
> > Yes, since it's now the only way to terminate items/actions lists [1][2].
> > There used to be a "max" value in the original draft but it seemed redundant
> > and proved annoying to use, and was therefore dropped.
> > 
> > END items/actions behave like a NUL terminator for C strings. They are
> > likewise defined with value 0 for convenience.
> 
> At least it is good idea to set END values to 0, but still if user not
> set it, most probably this will crash the app.
> 
> Although most probably this kind of error will be detected easily in
> development phase, still it would be nice to return an error instead of
> crashing when user provide wrong input.

Unfortunately I cannot think of an easy way to do that, even for debugging
purposes, this would be like checking for unterminated strings or linked
lists without a NULL ending pointer. That's the trade-off of any unbounded
data structure.

Note PMDs will likely return errors as they iterate on garbage item/action
types, crashes will also almost always occur when attempting to dereference
the related spec/last/mask/conf pointers.

> >> And these fields are direct inputs from user.
> >> Is there a way to verify user provided values are with TYPE_END terminated?
> > 
> > No, applications must check for its presence (they normally add it
> > themselves) before feeding these lists to PMDs. I think that's safe enough.
> > 
> > Note the testpmd flow command does not allow entering a flow rule without
> > "end" tokens in both lists, there is no way around this restriction.
> > 
> > [1] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#matching-pattern
> > [2] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#actions

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 38+ messages in thread

end of thread, other threads:[~2017-01-09 15:30 UTC | newest]

Thread overview: 38+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-25 18:14 [PATCH 0/3] net/mlx5: support flow_rte Nelio Laranjeiro
2016-11-25 18:14 ` [PATCH 1/3] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
2016-11-25 18:14 ` [PATCH 2/3] net/mlx5: add software " Nelio Laranjeiro
2016-11-25 18:14 ` [PATCH 3/3] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
2016-12-21 10:01 ` [PATCH v2 0/4] net/mlx5: support flow_rte Nelio Laranjeiro
2016-12-21 15:19   ` [PATCH v3 " Nelio Laranjeiro
2016-12-28 10:37     ` [PATCH v4 0/6] net/mlx5: support flow API Nelio Laranjeiro
2016-12-29 15:15       ` [PATCH v5 " Nelio Laranjeiro
2017-01-03 16:19         ` Ferruh Yigit
2017-01-04 14:48         ` Ferruh Yigit
2016-12-29 15:15       ` [PATCH v5 1/6] net/mlx5: add preliminary flow API support Nelio Laranjeiro
2016-12-29 15:15       ` [PATCH v5 2/6] net/mlx5: support basic flow items and actions Nelio Laranjeiro
2017-01-04 17:49         ` Ferruh Yigit
2017-01-04 18:42           ` Adrien Mazarguil
2017-01-06 13:52             ` Ferruh Yigit
2017-01-09 15:29               ` Adrien Mazarguil
2016-12-29 15:15       ` [PATCH v5 3/6] net/mlx5: support VLAN flow item Nelio Laranjeiro
2016-12-29 15:15       ` [PATCH v5 4/6] net/mlx5: support VXLAN " Nelio Laranjeiro
2016-12-29 15:15       ` [PATCH v5 5/6] net/mlx5: support mark flow action Nelio Laranjeiro
2016-12-29 15:15       ` [PATCH v5 6/6] net/mlx5: extend IPv4 flow item Nelio Laranjeiro
2016-12-28 10:37     ` [PATCH v4 1/6] net/mlx5: add preliminary flow API support Nelio Laranjeiro
2016-12-28 10:37     ` [PATCH v4 2/6] net/mlx5: support basic flow items and actions Nelio Laranjeiro
2016-12-28 10:37     ` [PATCH v4 3/6] net/mlx5: support VLAN flow item Nelio Laranjeiro
2016-12-28 10:37     ` [PATCH v4 4/6] net/mlx5: support VXLAN " Nelio Laranjeiro
2016-12-28 10:37     ` [PATCH v4 5/6] net/mlx5: support mark flow action Nelio Laranjeiro
2016-12-28 10:37     ` [PATCH v4 6/6] net/mlx5: extend IPv4 flow item Nelio Laranjeiro
2016-12-21 15:19   ` [PATCH v3 1/4] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
2016-12-21 15:19   ` [PATCH v3 2/4] net/mlx5: add software " Nelio Laranjeiro
2016-12-23 12:19     ` Ferruh Yigit
2016-12-23 13:24       ` Adrien Mazarguil
2016-12-21 15:19   ` [PATCH v3 3/4] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
2016-12-23 12:21     ` Ferruh Yigit
2016-12-26 12:20       ` Nélio Laranjeiro
2016-12-21 15:19   ` [PATCH v3 4/4] net/mlx5: add VLAN filter support in rte_flow Nelio Laranjeiro
2016-12-21 10:01 ` [PATCH v2 1/4] net/mlx5: add preliminary support for rte_flow Nelio Laranjeiro
2016-12-21 10:01 ` [PATCH v2 2/4] net/mlx5: add software " Nelio Laranjeiro
2016-12-21 10:01 ` [PATCH v2 3/4] net/mlx5: add rte_flow rule creation Nelio Laranjeiro
2016-12-21 10:01 ` [PATCH v2 4/4] net/mlx5: add VLAN filter support in rte_flow Nelio Laranjeiro

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.