All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ori Kam <orika@mellanox.com>
To: Matan Azrad <matan@mellanox.com>,
	Shahaf Shuler <shahafs@mellanox.com>,
	Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Cc: dev@dpdk.org, orika@mellanox.com, jingjing.wu@intel.com,
	stephen@networkplumber.org
Subject: [dpdk-dev] [PATCH v6 14/14] net/mlx5: split hairpin flows
Date: Sun, 27 Oct 2019 12:25:01 +0000	[thread overview]
Message-ID: <1572179102-163236-15-git-send-email-orika@mellanox.com> (raw)
In-Reply-To: <1572179102-163236-1-git-send-email-orika@mellanox.com>

Since the encap action is not supported in RX, we need to split the
hairpin flow into RX and TX.

Signed-off-by: Ori Kam <orika@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5.c            |  10 ++
 drivers/net/mlx5/mlx5.h            |  10 ++
 drivers/net/mlx5/mlx5_flow.c       | 281 +++++++++++++++++++++++++++++++++++--
 drivers/net/mlx5/mlx5_flow.h       |  14 +-
 drivers/net/mlx5/mlx5_flow_dv.c    |  10 +-
 drivers/net/mlx5/mlx5_flow_verbs.c |  11 +-
 drivers/net/mlx5/mlx5_rxq.c        |  26 ++++
 drivers/net/mlx5/mlx5_rxtx.h       |   2 +
 8 files changed, 334 insertions(+), 30 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index b7a98b8..b622339 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -531,6 +531,12 @@ struct mlx5_flow_id_pool *
 			goto error;
 		}
 	}
+	sh->flow_id_pool = mlx5_flow_id_pool_alloc();
+	if (!sh->flow_id_pool) {
+		DRV_LOG(ERR, "can't create flow id pool");
+		err = ENOMEM;
+		goto error;
+	}
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 	/*
 	 * Once the device is added to the list of memory event
@@ -570,6 +576,8 @@ struct mlx5_flow_id_pool *
 		claim_zero(mlx5_glue->dealloc_pd(sh->pd));
 	if (sh->ctx)
 		claim_zero(mlx5_glue->close_device(sh->ctx));
+	if (sh->flow_id_pool)
+		mlx5_flow_id_pool_release(sh->flow_id_pool);
 	rte_free(sh);
 	assert(err > 0);
 	rte_errno = err;
@@ -641,6 +649,8 @@ struct mlx5_flow_id_pool *
 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
 	if (sh->ctx)
 		claim_zero(mlx5_glue->close_device(sh->ctx));
+	if (sh->flow_id_pool)
+		mlx5_flow_id_pool_release(sh->flow_id_pool);
 	rte_free(sh);
 exit:
 	pthread_mutex_unlock(&mlx5_ibv_list_mutex);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1181c1f..f644998 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -578,6 +578,15 @@ struct mlx5_devx_dbr_page {
 	uint64_t dbr_bitmap[MLX5_DBR_BITMAP_SIZE];
 };
 
+/* ID generation structure. */
+struct mlx5_flow_id_pool {
+	uint32_t *free_arr; /**< Pointer to the a array of free values. */
+	uint32_t base_index;
+	/**< The next index that can be used without any free elements. */
+	uint32_t *curr; /**< Pointer to the index to pop. */
+	uint32_t *last; /**< Pointer to the last element in the empty arrray. */
+};
+
 /*
  * Shared Infiniband device context for Master/Representors
  * which belong to same IB device with multiple IB ports.
@@ -637,6 +646,7 @@ struct mlx5_ibv_shared {
 	struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */
 	struct mlx5_devx_obj *tis; /* TIS object. */
 	struct mlx5_devx_obj *td; /* Transport domain. */
+	struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
 	struct mlx5_ibv_shared_port port[]; /* per device port data array. */
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 1148db0..5f01f9c 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -606,7 +606,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(flow->actions &
+	const int mark = !!(dev_flow->actions &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
@@ -669,7 +669,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(flow->actions &
+	const int mark = !!(dev_flow->actions &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
@@ -2527,6 +2527,210 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 }
 
 /**
+ * Check if the flow should be splited due to hairpin.
+ * The reason for the split is that in current HW we can't
+ * support encap on Rx, so if a flow have encap we move it
+ * to Tx.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ *
+ * @return
+ *   > 0 the number of actions and the flow should be split,
+ *   0 when no split required.
+ */
+static int
+flow_check_hairpin_split(struct rte_eth_dev *dev,
+			 const struct rte_flow_attr *attr,
+			 const struct rte_flow_action actions[])
+{
+	int queue_action = 0;
+	int action_n = 0;
+	int encap = 0;
+	const struct rte_flow_action_queue *queue;
+	const struct rte_flow_action_rss *rss;
+	const struct rte_flow_action_raw_encap *raw_encap;
+
+	if (!attr->ingress)
+		return 0;
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			queue = actions->conf;
+			if (mlx5_rxq_get_type(dev, queue->index) !=
+			    MLX5_RXQ_TYPE_HAIRPIN)
+				return 0;
+			queue_action = 1;
+			action_n++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = actions->conf;
+			if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
+			    MLX5_RXQ_TYPE_HAIRPIN)
+				return 0;
+			queue_action = 1;
+			action_n++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+			encap = 1;
+			action_n++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+			raw_encap = actions->conf;
+			if (raw_encap->size >
+			    (sizeof(struct rte_flow_item_eth) +
+			     sizeof(struct rte_flow_item_ipv4)))
+				encap = 1;
+			action_n++;
+			break;
+		default:
+			action_n++;
+			break;
+		}
+	}
+	if (encap == 1 && queue_action)
+		return action_n;
+	return 0;
+}
+
+#define MLX5_MAX_SPLIT_ACTIONS 24
+#define MLX5_MAX_SPLIT_ITEMS 24
+
+/**
+ * Split the hairpin flow.
+ * Since HW can't support encap on Rx we move the encap to Tx.
+ * If the count action is after the encap then we also
+ * move the count action. in this case the count will also measure
+ * the outer bytes.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] actions_rx
+ *   Rx flow actions.
+ * @param[out] actions_tx
+ *   Tx flow actions..
+ * @param[out] pattern_tx
+ *   The pattern items for the Tx flow.
+ * @param[out] flow_id
+ *   The flow ID connected to this flow.
+ *
+ * @return
+ *   0 on success.
+ */
+static int
+flow_hairpin_split(struct rte_eth_dev *dev,
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_action actions_rx[],
+		   struct rte_flow_action actions_tx[],
+		   struct rte_flow_item pattern_tx[],
+		   uint32_t *flow_id)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	const struct rte_flow_action_raw_encap *raw_encap;
+	const struct rte_flow_action_raw_decap *raw_decap;
+	struct mlx5_rte_flow_action_set_tag *set_tag;
+	struct rte_flow_action *tag_action;
+	struct mlx5_rte_flow_item_tag *tag_item;
+	struct rte_flow_item *item;
+	char *addr;
+	struct rte_flow_error error;
+	int encap = 0;
+
+	mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+			rte_memcpy(actions_tx, actions,
+			       sizeof(struct rte_flow_action));
+			actions_tx++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			if (encap) {
+				rte_memcpy(actions_tx, actions,
+					   sizeof(struct rte_flow_action));
+				actions_tx++;
+			} else {
+				rte_memcpy(actions_rx, actions,
+					   sizeof(struct rte_flow_action));
+				actions_rx++;
+			}
+			break;
+		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+			raw_encap = actions->conf;
+			if (raw_encap->size >
+			    (sizeof(struct rte_flow_item_eth) +
+			     sizeof(struct rte_flow_item_ipv4))) {
+				memcpy(actions_tx, actions,
+				       sizeof(struct rte_flow_action));
+				actions_tx++;
+				encap = 1;
+			} else {
+				rte_memcpy(actions_rx, actions,
+					   sizeof(struct rte_flow_action));
+				actions_rx++;
+			}
+			break;
+		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+			raw_decap = actions->conf;
+			if (raw_decap->size <
+			    (sizeof(struct rte_flow_item_eth) +
+			     sizeof(struct rte_flow_item_ipv4))) {
+				memcpy(actions_tx, actions,
+				       sizeof(struct rte_flow_action));
+				actions_tx++;
+			} else {
+				rte_memcpy(actions_rx, actions,
+					   sizeof(struct rte_flow_action));
+				actions_rx++;
+			}
+			break;
+		default:
+			rte_memcpy(actions_rx, actions,
+				   sizeof(struct rte_flow_action));
+			actions_rx++;
+			break;
+		}
+	}
+	/* Add set meta action and end action for the Rx flow. */
+	tag_action = actions_rx;
+	tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
+	actions_rx++;
+	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
+	actions_rx++;
+	set_tag = (void *)actions_rx;
+	set_tag->id = flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, &error);
+	set_tag->data = rte_cpu_to_be_32(*flow_id);
+	tag_action->conf = set_tag;
+	/* Create Tx item list. */
+	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
+	addr = (void *)&pattern_tx[2];
+	item = pattern_tx;
+	item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
+	tag_item = (void *)addr;
+	tag_item->data = rte_cpu_to_be_32(*flow_id);
+	tag_item->id = flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, &error);
+	item->spec = tag_item;
+	addr += sizeof(struct mlx5_rte_flow_item_tag);
+	tag_item = (void *)addr;
+	tag_item->data = UINT32_MAX;
+	tag_item->id = UINT16_MAX;
+	item->mask = tag_item;
+	addr += sizeof(struct mlx5_rte_flow_item_tag);
+	item->last = NULL;
+	item++;
+	item->type = RTE_FLOW_ITEM_TYPE_END;
+	return 0;
+}
+
+/**
  * Create a flow and add it to @p list.
  *
  * @param dev
@@ -2554,6 +2758,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		 const struct rte_flow_action actions[],
 		 bool external, struct rte_flow_error *error)
 {
+	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = NULL;
 	struct mlx5_flow *dev_flow;
 	const struct rte_flow_action_rss *rss;
@@ -2561,16 +2766,44 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		struct rte_flow_expand_rss buf;
 		uint8_t buffer[2048];
 	} expand_buffer;
+	union {
+		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
+		uint8_t buffer[2048];
+	} actions_rx;
+	union {
+		struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
+		uint8_t buffer[2048];
+	} actions_hairpin_tx;
+	union {
+		struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
+		uint8_t buffer[2048];
+	} items_tx;
 	struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+	const struct rte_flow_action *p_actions_rx = actions;
 	int ret;
 	uint32_t i;
 	uint32_t flow_size;
+	int hairpin_flow = 0;
+	uint32_t hairpin_id = 0;
+	struct rte_flow_attr attr_tx = { .priority = 0 };
 
-	ret = flow_drv_validate(dev, attr, items, actions, external, error);
+	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
+	if (hairpin_flow > 0) {
+		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
+			rte_errno = EINVAL;
+			return NULL;
+		}
+		flow_hairpin_split(dev, actions, actions_rx.actions,
+				   actions_hairpin_tx.actions, items_tx.items,
+				   &hairpin_id);
+		p_actions_rx = actions_rx.actions;
+	}
+	ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
+				error);
 	if (ret < 0)
-		return NULL;
+		goto error_before_flow;
 	flow_size = sizeof(struct rte_flow);
-	rss = flow_get_rss_action(actions);
+	rss = flow_get_rss_action(p_actions_rx);
 	if (rss)
 		flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
 					    sizeof(void *));
@@ -2579,11 +2812,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	flow = rte_calloc(__func__, 1, flow_size, 0);
 	if (!flow) {
 		rte_errno = ENOMEM;
-		return NULL;
+		goto error_before_flow;
 	}
 	flow->drv_type = flow_get_drv_type(dev, attr);
 	flow->ingress = attr->ingress;
 	flow->transfer = attr->transfer;
+	if (hairpin_id != 0)
+		flow->hairpin_flow_id = hairpin_id;
 	assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
 	       flow->drv_type < MLX5_FLOW_TYPE_MAX);
 	flow->queue = (void *)(flow + 1);
@@ -2604,7 +2839,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	}
 	for (i = 0; i < buf->entries; ++i) {
 		dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
-					    actions, error);
+					    p_actions_rx, error);
 		if (!dev_flow)
 			goto error;
 		dev_flow->flow = flow;
@@ -2612,7 +2847,24 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
 		ret = flow_drv_translate(dev, dev_flow, attr,
 					 buf->entry[i].pattern,
-					 actions, error);
+					 p_actions_rx, error);
+		if (ret < 0)
+			goto error;
+	}
+	/* Create the tx flow. */
+	if (hairpin_flow) {
+		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
+		attr_tx.ingress = 0;
+		attr_tx.egress = 1;
+		dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+					    actions_hairpin_tx.actions, error);
+		if (!dev_flow)
+			goto error;
+		dev_flow->flow = flow;
+		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
+					 items_tx.items,
+					 actions_hairpin_tx.actions, error);
 		if (ret < 0)
 			goto error;
 	}
@@ -2624,8 +2876,16 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	TAILQ_INSERT_TAIL(list, flow, next);
 	flow_rxq_flags_set(dev, flow);
 	return flow;
+error_before_flow:
+	if (hairpin_id)
+		mlx5_flow_id_release(priv->sh->flow_id_pool,
+				     hairpin_id);
+	return NULL;
 error:
 	ret = rte_errno; /* Save rte_errno before cleanup. */
+	if (flow->hairpin_flow_id)
+		mlx5_flow_id_release(priv->sh->flow_id_pool,
+				     flow->hairpin_flow_id);
 	assert(flow);
 	flow_drv_destroy(dev, flow);
 	rte_free(flow);
@@ -2715,12 +2975,17 @@ struct rte_flow *
 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
 		  struct rte_flow *flow)
 {
+	struct mlx5_priv *priv = dev->data->dev_private;
+
 	/*
 	 * Update RX queue flags only if port is started, otherwise it is
 	 * already clean.
 	 */
 	if (dev->data->dev_started)
 		flow_rxq_flags_trim(dev, flow);
+	if (flow->hairpin_flow_id)
+		mlx5_flow_id_release(priv->sh->flow_id_pool,
+				     flow->hairpin_flow_id);
 	flow_drv_destroy(dev, flow);
 	TAILQ_REMOVE(list, flow, next);
 	rte_free(flow->fdir);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f81e1b1..7559810 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -466,6 +466,8 @@ struct mlx5_flow {
 	struct rte_flow *flow; /**< Pointer to the main flow. */
 	uint64_t layers;
 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+	uint64_t actions;
+	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		struct mlx5_flow_dv dv;
@@ -487,12 +489,11 @@ struct rte_flow {
 	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
 	LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
 	/**< Device flows that are part of the flow. */
-	uint64_t actions;
-	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 	struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
 	uint8_t ingress; /**< 1 if the flow is ingress. */
 	uint32_t group; /**< The group index. */
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
+	uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
 };
 
 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
@@ -536,15 +537,6 @@ struct mlx5_flow_driver_ops {
 #define MLX5_CNT_CONTAINER_UNUSED(sh, batch, thread) (&(sh)->cmng.ccont \
 	[(~((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)])
 
-/* ID generation structure. */
-struct mlx5_flow_id_pool {
-	uint32_t *free_arr; /**< Pointer to the a array of free values. */
-	uint32_t base_index;
-	/**< The next index that can be used without any free elements. */
-	uint32_t *curr; /**< Pointer to the index to pop. */
-	uint32_t *last; /**< Pointer to the last element in the empty arrray. */
-};
-
 /* mlx5_flow.c */
 
 struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(void);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 13178cc..d9a7fd4 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -5843,7 +5843,7 @@ struct field_modify_info modify_tcp[] = {
 			modify_action_position = actions_n++;
 	}
 	dev_flow->dv.actions_n = actions_n;
-	flow->actions = action_flags;
+	dev_flow->actions = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -6070,7 +6070,7 @@ struct field_modify_info modify_tcp[] = {
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
 		dv = &dev_flow->dv;
 		n = dv->actions_n;
-		if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
 			if (flow->transfer) {
 				dv->actions[n++] = priv->sh->esw_drop_action;
 			} else {
@@ -6085,7 +6085,7 @@ struct field_modify_info modify_tcp[] = {
 				}
 				dv->actions[n++] = dv->hrxq->action;
 			}
-		} else if (flow->actions &
+		} else if (dev_flow->actions &
 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
 			struct mlx5_hrxq *hrxq;
 
@@ -6141,7 +6141,7 @@ struct field_modify_info modify_tcp[] = {
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
 		struct mlx5_flow_dv *dv = &dev_flow->dv;
 		if (dv->hrxq) {
-			if (flow->actions & MLX5_FLOW_ACTION_DROP)
+			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
 				mlx5_hrxq_release(dev, dv->hrxq);
@@ -6375,7 +6375,7 @@ struct field_modify_info modify_tcp[] = {
 			dv->flow = NULL;
 		}
 		if (dv->hrxq) {
-			if (flow->actions & MLX5_FLOW_ACTION_DROP)
+			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
 				mlx5_hrxq_release(dev, dv->hrxq);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 23110f2..fd27f6c 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -191,7 +191,7 @@
 {
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
-	if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
+	if (flow->counter->cs) {
 		struct rte_flow_query_count *qc = data;
 		uint64_t counters[2] = {0, 0};
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
@@ -1410,7 +1410,6 @@
 		     const struct rte_flow_action actions[],
 		     struct rte_flow_error *error)
 {
-	struct rte_flow *flow = dev_flow->flow;
 	uint64_t item_flags = 0;
 	uint64_t action_flags = 0;
 	uint64_t priority = attr->priority;
@@ -1460,7 +1459,7 @@
 						  "action not supported");
 		}
 	}
-	flow->actions = action_flags;
+	dev_flow->actions = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1592,7 +1591,7 @@
 			verbs->flow = NULL;
 		}
 		if (verbs->hrxq) {
-			if (flow->actions & MLX5_FLOW_ACTION_DROP)
+			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
 				mlx5_hrxq_release(dev, verbs->hrxq);
@@ -1656,7 +1655,7 @@
 
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
 		verbs = &dev_flow->verbs;
-		if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+		if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
 			verbs->hrxq = mlx5_hrxq_drop_new(dev);
 			if (!verbs->hrxq) {
 				rte_flow_error_set
@@ -1717,7 +1716,7 @@
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
 		verbs = &dev_flow->verbs;
 		if (verbs->hrxq) {
-			if (flow->actions & MLX5_FLOW_ACTION_DROP)
+			if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
 				mlx5_hrxq_release(dev, verbs->hrxq);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 2c3d5eb..24d0eaa 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2097,6 +2097,32 @@ struct mlx5_rxq_ctrl *
 }
 
 /**
+ * Get a Rx queue type.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   Rx queue index.
+ *
+ * @return
+ *   The Rx queue type.
+ */
+enum mlx5_rxq_type
+mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+
+	if ((*priv->rxqs)[idx]) {
+		rxq_ctrl = container_of((*priv->rxqs)[idx],
+					struct mlx5_rxq_ctrl,
+					rxq);
+		return rxq_ctrl->type;
+	}
+	return MLX5_RXQ_TYPE_UNDEFINED;
+}
+
+/**
  * Create an indirection table.
  *
  * @param dev
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 271b648..d4ba25f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -166,6 +166,7 @@ enum mlx5_rxq_obj_type {
 enum mlx5_rxq_type {
 	MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */
 	MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */
+	MLX5_RXQ_TYPE_UNDEFINED,
 };
 
 /* Verbs/DevX Rx queue elements. */
@@ -406,6 +407,7 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
 				const uint16_t *queues, uint32_t queues_n);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
 int mlx5_hrxq_verify(struct rte_eth_dev *dev);
+enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
 void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
 uint64_t mlx5_get_rx_port_offloads(void);
-- 
1.8.3.1


  parent reply	other threads:[~2019-10-27 12:27 UTC|newest]

Thread overview: 186+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-26  6:28 [dpdk-dev] [PATCH 00/13] add hairpin feature Ori Kam
2019-09-26  6:28 ` [dpdk-dev] [PATCH 01/13] ethdev: support setup function for hairpin queue Ori Kam
2019-09-26 12:18   ` Andrew Rybchenko
     [not found]     ` <AM0PR0502MB4019A2FEADE5F9DCD0D9DDFED2860@AM0PR0502MB4019.eurprd05.prod.outlook.com>
2019-09-26 15:58       ` Ori Kam
2019-09-26 17:24         ` Andrew Rybchenko
2019-09-28 15:19           ` Ori Kam
2019-09-29 12:10             ` Andrew Rybchenko
2019-10-02 12:19               ` Ori Kam
2019-10-03 13:26                 ` Andrew Rybchenko
2019-10-03 17:46                   ` Ori Kam
2019-10-03 18:39     ` Ray Kinsella
2019-09-26  6:28 ` [dpdk-dev] [PATCH 02/13] net/mlx5: query hca hairpin capabilities Ori Kam
2019-09-26  9:31   ` Slava Ovsiienko
2019-09-26  6:28 ` [dpdk-dev] [PATCH 03/13] net/mlx5: support Rx hairpin queues Ori Kam
2019-09-26  9:32   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 04/13] net/mlx5: prepare txq to work with different types Ori Kam
2019-09-26  9:32   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 05/13] net/mlx5: support Tx hairpin queues Ori Kam
2019-09-26  9:32   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 06/13] app/testpmd: add hairpin support Ori Kam
2019-09-26  9:32   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 07/13] net/mlx5: add hairpin binding function Ori Kam
2019-09-26  9:33   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 08/13] net/mlx5: add support for hairpin hrxq Ori Kam
2019-09-26  9:33   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 09/13] net/mlx5: add internal tag item and action Ori Kam
2019-09-26  9:33   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 10/13] net/mlx5: add id generation function Ori Kam
2019-09-26  9:34   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 11/13] net/mlx5: add default flows for hairpin Ori Kam
2019-09-26  9:34   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 12/13] net/mlx5: split hairpin flows Ori Kam
2019-09-26  9:34   ` Slava Ovsiienko
2019-09-26  6:29 ` [dpdk-dev] [PATCH 13/13] doc: add hairpin feature Ori Kam
2019-09-26  9:34   ` Slava Ovsiienko
2019-09-26 12:32 ` [dpdk-dev] [PATCH 00/13] " Andrew Rybchenko
2019-09-26 15:22   ` Ori Kam
2019-09-26 15:48     ` Andrew Rybchenko
2019-09-26 16:11       ` Ori Kam
2019-10-04 19:54 ` [dpdk-dev] [PATCH v2 00/14] " Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 01/14] ethdev: add support for hairpin queue Ori Kam
2019-10-08 16:11     ` Andrew Rybchenko
2019-10-10 21:07       ` Ori Kam
2019-10-14  9:37         ` Andrew Rybchenko
2019-10-14 10:19           ` Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 02/14] net/mlx5: query hca hairpin capabilities Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 03/14] net/mlx5: support Rx hairpin queues Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 04/14] net/mlx5: prepare txq to work with different types Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 05/14] net/mlx5: support Tx hairpin queues Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 06/14] net/mlx5: add get hairpin capabilities Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 07/14] app/testpmd: add hairpin support Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 08/14] net/mlx5: add hairpin binding function Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 09/14] net/mlx5: add support for hairpin hrxq Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 10/14] net/mlx5: add internal tag item and action Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 11/14] net/mlx5: add id generation function Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 12/14] net/mlx5: add default flows for hairpin Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 13/14] net/mlx5: split hairpin flows Ori Kam
2019-10-04 19:54   ` [dpdk-dev] [PATCH v2 14/14] doc: add hairpin feature Ori Kam
2019-10-08 14:55     ` Andrew Rybchenko
2019-10-10  8:24       ` Ori Kam
2019-10-15  9:04 ` [dpdk-dev] [PATCH v3 00/14] " Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 01/14] ethdev: add support for hairpin queue Ori Kam
2019-10-15 10:12     ` Andrew Rybchenko
2019-10-16 19:36       ` Ori Kam
2019-10-17 10:41         ` Andrew Rybchenko
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 02/14] net/mlx5: query hca hairpin capabilities Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 03/14] net/mlx5: support Rx hairpin queues Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 04/14] net/mlx5: prepare txq to work with different types Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 05/14] net/mlx5: support Tx hairpin queues Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 06/14] net/mlx5: add get hairpin capabilities Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 07/14] app/testpmd: add hairpin support Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 08/14] net/mlx5: add hairpin binding function Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 09/14] net/mlx5: add support for hairpin hrxq Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 10/14] net/mlx5: add internal tag item and action Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 11/14] net/mlx5: add id generation function Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 12/14] net/mlx5: add default flows for hairpin Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 13/14] net/mlx5: split hairpin flows Ori Kam
2019-10-15  9:04   ` [dpdk-dev] [PATCH v3 14/14] doc: add hairpin feature Ori Kam
2019-10-17 15:32 ` [dpdk-dev] [PATCH v4 00/15] " Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 01/15] ethdev: move queue state defines to private file Ori Kam
2019-10-17 15:37     ` Stephen Hemminger
2019-10-22 10:59     ` Andrew Rybchenko
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 02/15] ethdev: add support for hairpin queue Ori Kam
2019-10-17 21:01     ` Thomas Monjalon
2019-10-22 11:37     ` Andrew Rybchenko
2019-10-23  6:23       ` Ori Kam
2019-10-23  7:04     ` Thomas Monjalon
2019-10-23 10:09       ` Ori Kam
2019-10-23 10:18         ` Bruce Richardson
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 03/15] net/mlx5: query hca hairpin capabilities Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 04/15] net/mlx5: support Rx hairpin queues Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 05/15] net/mlx5: prepare txq to work with different types Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 06/15] net/mlx5: support Tx hairpin queues Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 07/15] net/mlx5: add get hairpin capabilities Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 08/15] app/testpmd: add hairpin support Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 09/15] net/mlx5: add hairpin binding function Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 10/15] net/mlx5: add support for hairpin hrxq Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 11/15] net/mlx5: add internal tag item and action Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 12/15] net/mlx5: add id generation function Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 13/15] net/mlx5: add default flows for hairpin Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 14/15] net/mlx5: split hairpin flows Ori Kam
2019-10-17 15:32   ` [dpdk-dev] [PATCH v4 15/15] doc: add hairpin feature Ori Kam
2019-10-18 19:07   ` [dpdk-dev] [PATCH v4 00/15] " Ferruh Yigit
2019-10-23 13:37 ` [dpdk-dev] [PATCH v5 " Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 01/15] ethdev: move queue state defines to private file Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 02/15] ethdev: add support for hairpin queue Ori Kam
2019-10-24  7:54     ` Andrew Rybchenko
2019-10-24  8:29       ` Ori Kam
2019-10-24 14:47         ` Andrew Rybchenko
2019-10-24 15:17           ` Thomas Monjalon
2019-10-24 15:30             ` Andrew Rybchenko
2019-10-24 15:34               ` Thomas Monjalon
2019-10-25 19:01                 ` Ori Kam
2019-10-25 22:16                   ` Thomas Monjalon
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 03/15] net/mlx5: query hca hairpin capabilities Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 04/15] net/mlx5: support Rx hairpin queues Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 05/15] net/mlx5: prepare txq to work with different types Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 06/15] net/mlx5: support Tx hairpin queues Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 07/15] net/mlx5: add get hairpin capabilities Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 08/15] app/testpmd: add hairpin support Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 09/15] net/mlx5: add hairpin binding function Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 10/15] net/mlx5: add support for hairpin hrxq Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 11/15] net/mlx5: add internal tag item and action Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 12/15] net/mlx5: add id generation function Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 13/15] net/mlx5: add default flows for hairpin Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 14/15] net/mlx5: split hairpin flows Ori Kam
2019-10-23 13:37   ` [dpdk-dev] [PATCH v5 15/15] doc: add hairpin feature Ori Kam
2019-10-24  8:11     ` Thomas Monjalon
2019-10-25 18:49   ` [dpdk-dev] [PATCH v5 00/15] " Ferruh Yigit
2019-10-27 12:24 ` [dpdk-dev] [PATCH v6 00/14] " Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 01/14] ethdev: move queue state defines to private file Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 02/14] ethdev: add support for hairpin queue Ori Kam
2019-10-28 15:16     ` Andrew Rybchenko
2019-10-28 18:44       ` Ori Kam
2019-10-29  7:38         ` Andrew Rybchenko
2019-10-29 19:39           ` Ori Kam
2019-10-30  6:39             ` Andrew Rybchenko
2019-10-30  6:56               ` Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 03/14] net/mlx5: query hca hairpin capabilities Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 04/14] net/mlx5: support Rx hairpin queues Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 05/14] net/mlx5: prepare txq to work with different types Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 06/14] net/mlx5: support Tx hairpin queues Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 07/14] net/mlx5: add get hairpin capabilities Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 08/14] app/testpmd: add hairpin support Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 09/14] net/mlx5: add hairpin binding function Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 10/14] net/mlx5: add support for hairpin hrxq Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 11/14] net/mlx5: add internal tag item and action Ori Kam
2019-10-27 12:24   ` [dpdk-dev] [PATCH v6 12/14] net/mlx5: add id generation function Ori Kam
2019-10-27 12:25   ` [dpdk-dev] [PATCH v6 13/14] net/mlx5: add default flows for hairpin Ori Kam
2019-10-27 12:25   ` Ori Kam [this message]
2019-10-30 23:53 ` [dpdk-dev] [PATCH v7 00/14] add hairpin feature Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 01/14] ethdev: move queue state defines to private file Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 02/14] ethdev: add support for hairpin queue Ori Kam
2019-10-31  8:25     ` Andrew Rybchenko
2019-11-05 11:24     ` Ferruh Yigit
2019-11-05 11:36       ` Ori Kam
2019-11-05 11:49         ` Andrew Rybchenko
2019-11-05 12:00           ` Ori Kam
2019-11-05 12:05           ` Ferruh Yigit
2019-11-05 12:12             ` Andrew Rybchenko
2019-11-05 12:23               ` Ferruh Yigit
2019-11-05 12:27                 ` Andrew Rybchenko
2019-11-05 12:51                   ` Thomas Monjalon
2019-11-05 12:53                     ` Andrew Rybchenko
2019-11-05 13:02                       ` Thomas Monjalon
2019-11-05 13:23                         ` Ori Kam
2019-11-05 13:27                           ` Thomas Monjalon
2019-11-05 13:34                             ` Andrew Rybchenko
2019-11-05 13:41                         ` Andrew Rybchenko
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 03/14] net/mlx5: query hca hairpin capabilities Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 04/14] net/mlx5: support Rx hairpin queues Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 05/14] net/mlx5: prepare txq to work with different types Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 06/14] net/mlx5: support Tx hairpin queues Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 07/14] net/mlx5: add get hairpin capabilities Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 08/14] app/testpmd: add hairpin support Ori Kam
2019-10-31 17:11     ` Ferruh Yigit
2019-10-31 17:36       ` Ori Kam
2019-10-31 17:54         ` Ferruh Yigit
2019-10-31 18:59           ` Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 09/14] net/mlx5: add hairpin binding function Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 10/14] net/mlx5: add support for hairpin hrxq Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 11/14] net/mlx5: add internal tag item and action Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 12/14] net/mlx5: add id generation function Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 13/14] net/mlx5: add default flows for hairpin Ori Kam
2019-10-30 23:53   ` [dpdk-dev] [PATCH v7 14/14] net/mlx5: split hairpin flows Ori Kam
2019-10-31 17:13   ` [dpdk-dev] [PATCH v7 00/14] add hairpin feature Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1572179102-163236-15-git-send-email-orika@mellanox.com \
    --to=orika@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=matan@mellanox.com \
    --cc=shahafs@mellanox.com \
    --cc=stephen@networkplumber.org \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.