All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@nvidia.com>
To: Matan Azrad <matan@nvidia.com>,
	Shahaf Shuler <shahafs@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v3 23/34] net/mlx5: make Rx queue thread safe
Date: Tue, 27 Oct 2020 20:27:18 +0800	[thread overview]
Message-ID: <1603801650-442376-24-git-send-email-suanmingm@nvidia.com> (raw)
In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com>

This commit applies the cache linked list to Rx queue to make it thread
safe.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c   |   5 +
 drivers/net/mlx5/mlx5.c            |   1 +
 drivers/net/mlx5/mlx5.h            |  28 +++-
 drivers/net/mlx5/mlx5_flow.h       |  16 ---
 drivers/net/mlx5/mlx5_flow_dv.c    |  74 ++++------
 drivers/net/mlx5/mlx5_flow_verbs.c |  21 +--
 drivers/net/mlx5/mlx5_rxq.c        | 281 ++++++++++++++++++++-----------------
 drivers/net/mlx5/mlx5_rxtx.h       |  20 +--
 8 files changed, 228 insertions(+), 218 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index d017c23..10fc7c5 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1468,6 +1468,10 @@
 			err = ENOTSUP;
 			goto error;
 	}
+	mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
+			     mlx5_hrxq_create_cb,
+			     mlx5_hrxq_match_cb,
+			     mlx5_hrxq_remove_cb);
 	/* Query availability of metadata reg_c's. */
 	err = mlx5_flow_discover_mreg_c(eth_dev);
 	if (err < 0) {
@@ -1520,6 +1524,7 @@
 			mlx5_drop_action_destroy(eth_dev);
 		if (own_domain_id)
 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+		mlx5_cache_list_destroy(&priv->hrxqs);
 		mlx5_free(priv);
 		if (eth_dev != NULL)
 			eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 1d25a8e..862bd40 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1286,6 +1286,7 @@ struct mlx5_dev_ctx_shared *
 	if (ret)
 		DRV_LOG(WARNING, "port %u some flows still remain",
 			dev->data->port_id);
+	mlx5_cache_list_destroy(&priv->hrxqs);
 	/*
 	 * Free the shared context in last turn, because the cleanup
 	 * routines above may use some shared fields, like
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 417e111..99dfcd7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -65,6 +65,13 @@ enum mlx5_reclaim_mem_mode {
 	MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
 };
 
+/* Hash and cache list callback context. */
+struct mlx5_flow_cb_ctx {
+	struct rte_eth_dev *dev;
+	struct rte_flow_error *error;
+	void *data;
+};
+
 /* Device attributes used in mlx5 PMD */
 struct mlx5_dev_attr {
 	uint64_t	device_cap_flags_ex;
@@ -688,6 +695,22 @@ struct mlx5_proc_priv {
 /* MTR list. */
 TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter);
 
+/* RSS description. */
+struct mlx5_flow_rss_desc {
+	uint32_t level;
+	uint32_t queue_num; /**< Number of entries in @p queue. */
+	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+	uint64_t hash_fields; /* Verbs Hash fields. */
+	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+	uint32_t key_len; /**< RSS hash key len. */
+	uint32_t tunnel; /**< Queue in tunnel. */
+	union {
+		uint16_t *queue; /**< Destination queues. */
+		const uint16_t *const_q; /**< Const pointer convert. */
+	};
+	bool standalone; /**< Queue is standalone or not. */
+};
+
 #define MLX5_PROC_PRIV(port_id) \
 	((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
 
@@ -727,7 +750,7 @@ struct mlx5_ind_table_obj {
 /* Hash Rx queue. */
 __extension__
 struct mlx5_hrxq {
-	ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
+	struct mlx5_cache_entry entry; /* Cache entry. */
 	rte_atomic32_t refcnt; /* Reference counter. */
 	uint32_t standalone:1; /* This object used in shared action. */
 	struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
@@ -741,6 +764,7 @@ struct mlx5_hrxq {
 #endif
 	uint64_t hash_fields; /* Verbs Hash fields. */
 	uint32_t rss_key_len; /* Hash key length in bytes. */
+	uint32_t idx; /* Hash Rx queue index. */
 	uint8_t rss_key[]; /* Hash key. */
 };
 
@@ -858,7 +882,7 @@ struct mlx5_priv {
 	struct mlx5_obj_ops obj_ops; /* HW objects operations. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
-	uint32_t hrxqs; /* Verbs Hash Rx queues. */
+	struct mlx5_cache_list hrxqs; /* Hash Rx queues. */
 	LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
 	LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
 	/* Indirection tables. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4bc540b..6b706e7 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -385,13 +385,6 @@ enum mlx5_flow_fate_type {
 	MLX5_FLOW_FATE_MAX,
 };
 
-/* Hash list callback context */
-struct mlx5_flow_cb_ctx {
-	struct rte_eth_dev *dev;
-	struct rte_flow_error *error;
-	void *data;
-};
-
 /* Matcher PRM representation */
 struct mlx5_flow_dv_match_params {
 	size_t size;
@@ -610,15 +603,6 @@ struct ibv_spec_header {
 	uint16_t size;
 };
 
-/* RSS description. */
-struct mlx5_flow_rss_desc {
-	uint32_t level;
-	uint32_t queue_num; /**< Number of entries in @p queue. */
-	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
-	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
-	uint16_t *queue; /**< Destination queues. */
-};
-
 /* PMD flow priority for tunnel */
 #define MLX5_TUNNEL_PRIO_GET(rss_desc) \
 	((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 1bcbe38..47dea4a 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8521,7 +8521,7 @@ struct mlx5_hlist_entry *
 }
 
 /**
- * Create an Rx Hash queue.
+ * Prepare an Rx Hash queue.
  *
  * @param dev
  *   Pointer to Ethernet device.
@@ -8536,29 +8536,23 @@ struct mlx5_hlist_entry *
  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
  */
 static struct mlx5_hrxq *
-flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
-			struct mlx5_flow *dev_flow,
-			struct mlx5_flow_rss_desc *rss_desc,
-			uint32_t *hrxq_idx)
+flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
+		     struct mlx5_flow *dev_flow,
+		     struct mlx5_flow_rss_desc *rss_desc,
+		     uint32_t *hrxq_idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_handle *dh = dev_flow->handle;
 	struct mlx5_hrxq *hrxq;
 
 	MLX5_ASSERT(rss_desc->queue_num);
-	*hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
-				  dev_flow->hash_fields,
-				  rss_desc->queue, rss_desc->queue_num);
-	if (!*hrxq_idx) {
-		*hrxq_idx = mlx5_hrxq_new
-				(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
-				 dev_flow->hash_fields,
-				 rss_desc->queue, rss_desc->queue_num,
-				 !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL),
-				 false);
-		if (!*hrxq_idx)
-			return NULL;
-	}
+	rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
+	rss_desc->hash_fields = dev_flow->hash_fields;
+	rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
+	rss_desc->standalone = false;
+	*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
+	if (!*hrxq_idx)
+		return NULL;
 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 			      *hrxq_idx);
 	return hrxq;
@@ -8921,8 +8915,8 @@ struct mlx5_hlist_entry *
 			queue = sub_actions->conf;
 			rss_desc->queue_num = 1;
 			rss_desc->queue[0] = queue->index;
-			hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
-					rss_desc, &hrxq_idx);
+			hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+						    rss_desc, &hrxq_idx);
 			if (!hrxq)
 				return rte_flow_error_set
 					(error, rte_errno,
@@ -9119,8 +9113,8 @@ struct mlx5_hlist_entry *
 	if (num_of_dest > 1) {
 		if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
 			/* Handle QP action for mirroring */
-			hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
-						       rss_desc, &hrxq_idx);
+			hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
+						    rss_desc, &hrxq_idx);
 			if (!hrxq)
 				return rte_flow_error_set
 				     (error, rte_errno,
@@ -10254,24 +10248,8 @@ struct mlx5_hlist_entry *
 		struct mlx5_flow_rss_desc *rss_desc =
 				&wks->rss_desc[!!wks->flow_nested_idx];
 
-		MLX5_ASSERT(rss_desc->queue_num);
-		hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
-					 MLX5_RSS_HASH_KEY_LEN,
-					 dev_flow->hash_fields,
-					 rss_desc->queue, rss_desc->queue_num);
-		if (!hrxq_idx) {
-			hrxq_idx = mlx5_hrxq_new(dev,
-						 rss_desc->key,
-						 MLX5_RSS_HASH_KEY_LEN,
-						 dev_flow->hash_fields,
-						 rss_desc->queue,
-						 rss_desc->queue_num,
-						 !!(dev_flow->handle->layers &
-						 MLX5_FLOW_LAYER_TUNNEL),
-						 false);
-		}
-		*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-				       hrxq_idx);
+		*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
+					     &hrxq_idx);
 	}
 	return hrxq_idx;
 }
@@ -10325,7 +10303,6 @@ struct mlx5_hlist_entry *
 			struct mlx5_hrxq *hrxq = NULL;
 			uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
 						(dev, flow, dev_flow, &hrxq);
-
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
@@ -10945,21 +10922,24 @@ struct mlx5_hlist_entry *
 			struct mlx5_shared_action_rss *action,
 			struct rte_flow_error *error)
 {
+	struct mlx5_flow_rss_desc rss_desc = { 0 };
 	size_t i;
 	int err;
 
+	memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
+	rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
+	rss_desc.const_q = action->origin.queue;
+	rss_desc.queue_num = action->origin.queue_num;
+	rss_desc.standalone = true;
 	for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
 		uint32_t hrxq_idx;
 		uint64_t hash_fields = mlx5_rss_hash_fields[i];
 		int tunnel;
 
 		for (tunnel = 0; tunnel < 2; tunnel++) {
-			hrxq_idx = mlx5_hrxq_new(dev, action->origin.key,
-					MLX5_RSS_HASH_KEY_LEN,
-					hash_fields,
-					action->origin.queue,
-					action->origin.queue_num,
-					tunnel, true);
+			rss_desc.tunnel = tunnel;
+			rss_desc.hash_fields = hash_fields;
+			hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
 			if (!hrxq_idx) {
 				rte_flow_error_set
 					(error, rte_errno,
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index ba6731a..9afa563 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1963,21 +1963,12 @@
 				&wks->rss_desc[!!wks->flow_nested_idx];
 
 			MLX5_ASSERT(rss_desc->queue_num);
-			hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
-						 MLX5_RSS_HASH_KEY_LEN,
-						 dev_flow->hash_fields,
-						 rss_desc->queue,
-						 rss_desc->queue_num);
-			if (!hrxq_idx)
-				hrxq_idx = mlx5_hrxq_new
-						(dev, rss_desc->key,
-						 MLX5_RSS_HASH_KEY_LEN,
-						 dev_flow->hash_fields,
-						 rss_desc->queue,
-						 rss_desc->queue_num,
-						 !!(handle->layers &
-						 MLX5_FLOW_LAYER_TUNNEL),
-						 false);
+			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
+			rss_desc->hash_fields = dev_flow->hash_fields;
+			rss_desc->tunnel = !!(handle->layers &
+					      MLX5_FLOW_LAYER_TUNNEL);
+			rss_desc->standalone = false;
+			hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 					      hrxq_idx);
 			if (!hrxq) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 9c9f8c4..0b0bdcc 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2040,54 +2040,38 @@ struct mlx5_ind_table_obj *
 }
 
 /**
- * Get an Rx Hash queue.
+ * Match an Rx Hash queue.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param rss_conf
- *   RSS configuration for the Rx hash queue.
- * @param queues
- *   Queues entering in hash queue. In case of empty hash_fields only the
- *   first queue index will be taken for the indirection table.
- * @param queues_n
- *   Number of queues.
+ * @param list
+ *   Cache list pointer.
+ * @param entry
+ *   Hash queue entry pointer.
+ * @param cb_ctx
+ *   Context of the callback function.
  *
  * @return
- *   An hash Rx queue index on success.
+ *   0 if match, none zero if not match.
  */
-uint32_t
-mlx5_hrxq_get(struct rte_eth_dev *dev,
-	      const uint8_t *rss_key, uint32_t rss_key_len,
-	      uint64_t hash_fields,
-	      const uint16_t *queues, uint32_t queues_n)
+int
+mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
+		   struct mlx5_cache_entry *entry,
+		   void *cb_ctx)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_hrxq *hrxq;
-	uint32_t idx;
-
-	queues_n = hash_fields ? queues_n : 1;
-	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
-		      hrxq, next) {
-		struct mlx5_ind_table_obj *ind_tbl;
+	struct rte_eth_dev *dev = list->ctx;
+	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+	struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+	struct mlx5_ind_table_obj *ind_tbl;
 
-		if (hrxq->rss_key_len != rss_key_len)
-			continue;
-		if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
-			continue;
-		if (hrxq->hash_fields != hash_fields)
-			continue;
-		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
-		if (!ind_tbl)
-			continue;
-		if (ind_tbl != hrxq->ind_table) {
-			mlx5_ind_table_obj_release(dev, ind_tbl,
-						   hrxq->standalone);
-			continue;
-		}
-		rte_atomic32_inc(&hrxq->refcnt);
-		return idx;
-	}
-	return 0;
+	if (hrxq->rss_key_len != rss_desc->key_len ||
+	    memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
+	    hrxq->hash_fields != rss_desc->hash_fields)
+		return 1;
+	ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
+					 rss_desc->queue_num);
+	if (ind_tbl)
+		mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+	return ind_tbl != hrxq->ind_table;
 }
 
 /**
@@ -2172,114 +2156,163 @@ struct mlx5_ind_table_obj *
 	return -rte_errno;
 }
 
-/**
- * Release the hash Rx queue.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param hrxq
- *   Index to Hash Rx queue to release.
- *
- * @return
- *   1 while a reference on it exists, 0 when freed.
- */
-int
-mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+static void
+__mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_hrxq *hrxq;
 
-	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
-	if (!hrxq)
-		return 0;
-	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		mlx5_glue->destroy_flow_action(hrxq->action);
+	mlx5_glue->destroy_flow_action(hrxq->action);
 #endif
-		priv->obj_ops.hrxq_destroy(hrxq);
-		mlx5_ind_table_obj_release(dev, hrxq->ind_table,
-					   hrxq->standalone);
-		if (!hrxq->standalone)
-			ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-				     &priv->hrxqs, hrxq_idx, hrxq, next);
-		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
-		return 0;
-	}
-	claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table,
-						 hrxq->standalone));
-	return 1;
+	priv->obj_ops.hrxq_destroy(hrxq);
+	mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone);
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
 }
 
 /**
- * Create an Rx Hash queue.
+ * Release the hash Rx queue.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param rss_key
- *   RSS key for the Rx hash queue.
- * @param rss_key_len
- *   RSS key length.
- * @param hash_fields
- *   Verbs protocol hash field to make the RSS on.
- * @param queues
- *   Queues entering in hash queue. In case of empty hash_fields only the
- *   first queue index will be taken for the indirection table.
- * @param queues_n
- *   Number of queues.
- * @param tunnel
- *   Tunnel type.
- * @param standalone
- *   Object of Rx Hash queue will be used in standalone shared action or not.
+ * @param hrxq
+ *   Index to Hash Rx queue to release.
  *
- * @return
- *   The DevX object initialized index, 0 otherwise and rte_errno is set.
+ * @param list
+ *   Cache list pointer.
+ * @param entry
+ *   Hash queue entry pointer.
  */
-uint32_t
-mlx5_hrxq_new(struct rte_eth_dev *dev,
-	      const uint8_t *rss_key, uint32_t rss_key_len,
-	      uint64_t hash_fields,
-	      const uint16_t *queues, uint32_t queues_n,
-	      int tunnel, bool standalone)
+void
+mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
+		    struct mlx5_cache_entry *entry)
+{
+	struct rte_eth_dev *dev = list->ctx;
+	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+
+	__mlx5_hrxq_remove(dev, hrxq);
+}
+
+static struct mlx5_hrxq *
+__mlx5_hrxq_create(struct rte_eth_dev *dev,
+		   struct mlx5_flow_rss_desc *rss_desc)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	const uint8_t *rss_key = rss_desc->key;
+	uint32_t rss_key_len =  rss_desc->key_len;
+	const uint16_t *queues =
+		rss_desc->standalone ? rss_desc->const_q : rss_desc->queue;
+	uint32_t queues_n = rss_desc->queue_num;
 	struct mlx5_hrxq *hrxq = NULL;
 	uint32_t hrxq_idx = 0;
 	struct mlx5_ind_table_obj *ind_tbl;
 	int ret;
 
-	queues_n = hash_fields ? queues_n : 1;
+	queues_n = rss_desc->hash_fields ? queues_n : 1;
 	ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
 	if (!ind_tbl)
 		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
-						 standalone);
-	if (!ind_tbl) {
-		rte_errno = ENOMEM;
-		return 0;
-	}
+						 rss_desc->standalone);
+	if (!ind_tbl)
+		return NULL;
 	hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
 	if (!hrxq)
 		goto error;
-	hrxq->standalone = !!standalone;
+	hrxq->standalone = rss_desc->standalone;
+	hrxq->idx = hrxq_idx;
 	hrxq->ind_table = ind_tbl;
 	hrxq->rss_key_len = rss_key_len;
-	hrxq->hash_fields = hash_fields;
+	hrxq->hash_fields = rss_desc->hash_fields;
 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
-	ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
-	if (ret < 0) {
-		rte_errno = errno;
+	ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
+	if (ret < 0)
 		goto error;
-	}
-	rte_atomic32_inc(&hrxq->refcnt);
-	if (!hrxq->standalone)
-		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
-			     hrxq_idx, hrxq, next);
-	return hrxq_idx;
+	return hrxq;
 error:
-	ret = rte_errno; /* Save rte_errno before cleanup. */
-	mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
+	mlx5_ind_table_obj_release(dev, ind_tbl, rss_desc->standalone);
 	if (hrxq)
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
-	rte_errno = ret; /* Restore rte_errno. */
+	return NULL;
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param list
+ *   Cache list pointer.
+ * @param entry
+ *   Hash queue entry pointer.
+ * @param cb_ctx
+ *   Context of the callback function.
+ *
+ * @return
+ *   queue entry on success, NULL otherwise.
+ */
+struct mlx5_cache_entry *
+mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
+		    struct mlx5_cache_entry *entry __rte_unused,
+		    void *cb_ctx)
+{
+	struct rte_eth_dev *dev = list->ctx;
+	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+	struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+	struct mlx5_hrxq *hrxq;
+
+	hrxq = __mlx5_hrxq_create(dev, rss_desc);
+	return hrxq ? &hrxq->entry : NULL;
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param rss_desc
+ *   RSS configuration for the Rx hash queue.
+ *
+ * @return
+ *   An hash Rx queue index on success.
+ */
+uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+		       struct mlx5_flow_rss_desc *rss_desc)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hrxq *hrxq;
+	struct mlx5_cache_entry *entry;
+	struct mlx5_flow_cb_ctx ctx = {
+		.data = rss_desc,
+	};
+
+	if (rss_desc->standalone) {
+		hrxq = __mlx5_hrxq_create(dev, rss_desc);
+	} else {
+		entry = mlx5_cache_register(&priv->hrxqs, &ctx);
+		if (!entry)
+			return 0;
+		hrxq = container_of(entry, typeof(*hrxq), entry);
+	}
+	return hrxq->idx;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq_idx
+ *   Index to Hash Rx queue to release.
+ *
+ * @return
+ *   1 while a reference on it exists, 0 when freed.
+ */
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hrxq *hrxq;
+
+	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+	if (!hrxq->standalone)
+		return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
+	__mlx5_hrxq_remove(dev, hrxq);
 	return 0;
 }
 
@@ -2364,22 +2397,12 @@ struct mlx5_hrxq *
  * @return
  *   The number of object not released.
  */
-int
+uint32_t
 mlx5_hrxq_verify(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_hrxq *hrxq;
-	uint32_t idx;
-	int ret = 0;
 
-	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
-		      hrxq, next) {
-		DRV_LOG(DEBUG,
-			"port %u hash Rx queue %p still referenced",
-			dev->data->port_id, (void *)hrxq);
-		++ret;
-	}
-	return ret;
+	return mlx5_cache_list_get_entry_num(&priv->hrxqs);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 8fe0473..c361bbd 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -349,17 +349,19 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
 			       struct mlx5_ind_table_obj *ind_tbl,
 			       bool standalone);
-uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
-		       const uint8_t *rss_key, uint32_t rss_key_len,
-		       uint64_t hash_fields,
-		       const uint16_t *queues, uint32_t queues_n,
-		       int tunnel, bool standalone);
+struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
+		struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx);
+int mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
+		       struct mlx5_cache_entry *entry,
+		       void *cb_ctx);
+void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
+			 struct mlx5_cache_entry *entry);
 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
-		       const uint8_t *rss_key, uint32_t rss_key_len,
-		       uint64_t hash_fields,
-		       const uint16_t *queues, uint32_t queues_n);
+		       struct mlx5_flow_rss_desc *rss_desc);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
-int mlx5_hrxq_verify(struct rte_eth_dev *dev);
+uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
+
+
 enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
 const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
 	(struct rte_eth_dev *dev, uint16_t idx);
-- 
1.8.3.1


  parent reply	other threads:[~2020-10-27 12:36 UTC|newest]

Thread overview: 193+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-06 11:48 [dpdk-dev] [PATCH 00/25] net/mlx5: support multiple-thread flow operations Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 01/25] net/mlx5: use thread safe index pool for flow objects Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 02/25] net/mlx5: use thread specific flow context Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 03/25] net/mlx5: reuse flow Id as hairpin Id Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 04/25] net/mlx5: indexed pool supports zero size entry Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 05/25] net/mlx5: use indexed pool for RSS flow ID Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 06/25] net/mlx5: make rte flow list thread safe Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 07/25] net/mlx5: support concurrent access for hash list Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 08/25] net/mlx5: make flow table cache thread safe Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 09/25] net/mlx5: fix redundant Direct Verbs resources allocate Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 10/25] net/mlx5: make flow tag list thread safe Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 11/25] net/mlx5: make flow modify action " Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 12/25] net/mlx5: make metadata copy flow " Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 13/25] net/mlx5: make header reformat action " Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 14/25] net/mlx5: remove unused hash list operations Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 15/25] net/mlx5: introduce thread safe linked list cache Suanming Mou
2020-10-06 11:48 ` [dpdk-dev] [PATCH 16/25] net/mlx5: make Rx queue thread safe Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 17/25] net/mlx5: make matcher list " Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 18/25] net/mlx5: make port ID action cache " Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 19/25] net/mlx5: make push VLAN " Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 20/25] net/mlx5: create global jump action Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 21/25] net/mlx5: create global default miss action Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 22/25] net/mlx5: create global drop action Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 23/25] net/mlx5: make meter action thread safe Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 24/25] net/mlx5: make VLAN network interface " Suanming Mou
2020-10-06 11:49 ` [dpdk-dev] [PATCH 25/25] net/mlx5: remove shared context lock Suanming Mou
2020-10-23  7:14 ` [dpdk-dev] [PATCH v2 00/25] *net/mlx5: support multiple-thread flow operations Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 01/25] net/mlx5: use thread safe index pool for flow objects Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 02/25] net/mlx5: use thread specific flow workspace Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 03/25] net/mlx5: reuse flow Id as hairpin Id Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 04/25] net/mlx5: indexed pool supports zero size entry Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 05/25] net/mlx5: use indexed pool for RSS flow ID Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 06/25] net/mlx5: make rte flow list thread safe Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 07/25] net/mlx5: make meter action " Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 08/25] net/mlx5: make VLAN network interface " Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 09/25] net/mlx5: create global jump action Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 10/25] net/mlx5: create global default miss action Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 11/25] net/mlx5: create global drop action Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 12/25] net/mlx5: support concurrent access for hash list Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 13/25] net/mlx5: make flow table cache thread safe Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 14/25] net/mlx5: fix redundant Direct Verbs resources allocate Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 15/25] net/mlx5: make flow tag list thread safe Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 16/25] net/mlx5: make flow modify action " Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 17/25] net/mlx5: remove unused mreg copy code Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 18/25] net/mlx5: make metadata copy flow list thread safe Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 19/25] net/mlx5: make header reformat action " Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 20/25] net/mlx5: remove unused hash list operations Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 21/25] net/mlx5: introduce thread safe linked list cache Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 22/25] net/mlx5: make Rx queue thread safe Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 23/25] net/mlx5: make matcher list " Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 24/25] net/mlx5: make port ID action cache " Suanming Mou
2020-10-23  7:14   ` [dpdk-dev] [PATCH v2 25/25] net/mlx5: make push VLAN " Suanming Mou
2020-10-27 12:26 ` [dpdk-dev] [PATCH v3 00/34] net/mlx5: support multiple-thread flow operations Suanming Mou
2020-10-27 12:26   ` [dpdk-dev] [PATCH v3 01/34] net/mlx5: use thread safe index pool for flow objects Suanming Mou
2020-10-27 12:26   ` [dpdk-dev] [PATCH v3 02/34] net/mlx5: use thread specific flow workspace Suanming Mou
2020-10-27 12:26   ` [dpdk-dev] [PATCH v3 03/34] net/mlx5: reuse flow Id as hairpin Id Suanming Mou
2020-10-27 12:26   ` [dpdk-dev] [PATCH v3 04/34] net/mlx5: indexed pool supports zero size entry Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 05/34] net/mlx5: use indexed pool as ID generator Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 06/34] net/mlx5: make rte flow list thread safe Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 07/34] net/mlx5: make meter action " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 08/34] net/mlx5: make VLAN network interface " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 09/34] net/mlx5: create global jump action Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 10/34] net/mlx5: create global default miss action Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 11/34] net/mlx5: create global drop action Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 12/34] net/mlx5: support concurrent access for hash list Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 13/34] net/mlx5: add flow table tunnel offload attribute Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 14/34] net/mlx5: make flow table cache thread safe Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 15/34] net/mlx5: fix redundant Direct Verbs resources allocate Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 16/34] net/mlx5: make flow tag list thread safe Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 17/34] net/mlx5: make flow modify action " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 18/34] net/mlx5: remove unused mreg copy code Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 19/34] net/mlx5: make metadata copy flow list thread safe Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 20/34] net/mlx5: make header reformat action " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 21/34] net/mlx5: introduce thread safe linked list cache Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 22/34] net/mlx5: optimize shared RSS list operation Suanming Mou
2020-10-27 12:27   ` Suanming Mou [this message]
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 24/34] net/mlx5: make matcher list thread safe Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 25/34] net/mlx5: make port ID action cache " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 26/34] net/mlx5: make push VLAN " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 27/34] net/mlx5: simplify sample attributes Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 28/34] net/mlx5: fix sample register error flow Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 29/34] net/mlx5: make sample and mirror action thread safe Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 30/34] net/mlx5: make tunnel offloading table " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 31/34] net/mlx5: remove unused hash list operations Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 32/34] net/mlx5: make tunnel hub list thread safe Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 33/34] net/mlx5: make shared action " Suanming Mou
2020-10-27 12:27   ` [dpdk-dev] [PATCH v3 34/34] net/mlx5: remove shared context lock Suanming Mou
2020-10-27 23:47 ` [dpdk-dev] [PATCH v4 00/34] net/mlx5: support multiple-thread flow operations Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 01/34] net/mlx5: use thread safe index pool for flow objects Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 02/34] net/mlx5: use thread specific flow workspace Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 03/34] net/mlx5: reuse flow Id as hairpin Id Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 04/34] net/mlx5: indexed pool supports zero size entry Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 05/34] net/mlx5: use indexed pool as ID generator Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 06/34] net/mlx5: make rte flow list thread safe Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 07/34] net/mlx5: make meter action " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 08/34] net/mlx5: make VLAN network interface " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 09/34] net/mlx5: create global jump action Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 10/34] net/mlx5: create global default miss action Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 11/34] net/mlx5: create global drop action Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 12/34] net/mlx5: support concurrent access for hash list Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 13/34] net/mlx5: add flow table tunnel offload attribute Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 14/34] net/mlx5: make flow table cache thread safe Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 15/34] net/mlx5: fix redundant Direct Verbs resources allocate Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 16/34] net/mlx5: make flow tag list thread safe Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 17/34] net/mlx5: make flow modify action " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 18/34] net/mlx5: remove unused mreg copy code Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 19/34] net/mlx5: make metadata copy flow list thread safe Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 20/34] net/mlx5: make header reformat action " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 21/34] net/mlx5: introduce thread safe linked list cache Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 22/34] net/mlx5: optimize shared RSS list operation Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 23/34] net/mlx5: make Rx queue thread safe Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 24/34] net/mlx5: make matcher list " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 25/34] net/mlx5: make port ID action cache " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 26/34] net/mlx5: make push VLAN " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 27/34] net/mlx5: simplify sample attributes Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 28/34] net/mlx5: fix sample register error flow Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 29/34] net/mlx5: make sample and mirror action thread safe Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 30/34] net/mlx5: make tunnel offloading table " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 31/34] net/mlx5: remove unused hash list operations Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 32/34] net/mlx5: make tunnel hub list thread safe Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 33/34] net/mlx5: make shared action " Suanming Mou
2020-10-27 23:47   ` [dpdk-dev] [PATCH v4 34/34] net/mlx5: remove shared context lock Suanming Mou
2020-10-28  8:59 ` [dpdk-dev] [PATCH v5 00/34] net/mlx5: support multiple-thread flow operations Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 01/34] net/mlx5: use thread safe index pool for flow objects Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 02/34] net/mlx5: use thread specific flow workspace Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 03/34] net/mlx5: reuse flow Id as hairpin Id Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 04/34] net/mlx5: indexed pool supports zero size entry Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 05/34] net/mlx5: use indexed pool as ID generator Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 06/34] net/mlx5: make rte flow list thread safe Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 07/34] net/mlx5: make meter action " Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 08/34] net/mlx5: make VLAN network interface " Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 09/34] net/mlx5: create global jump action Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 10/34] net/mlx5: create global default miss action Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 11/34] net/mlx5: create global drop action Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 12/34] net/mlx5: support concurrent access for hash list Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 13/34] net/mlx5: add flow table tunnel offload attribute Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 14/34] net/mlx5: make flow table cache thread safe Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 15/34] net/mlx5: fix redundant Direct Verbs resources allocate Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 16/34] net/mlx5: make flow tag list thread safe Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 17/34] net/mlx5: make flow modify action " Suanming Mou
2020-10-28  8:59   ` [dpdk-dev] [PATCH v5 18/34] net/mlx5: remove unused mreg copy code Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 19/34] net/mlx5: make metadata copy flow list thread safe Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 20/34] net/mlx5: make header reformat action " Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 21/34] net/mlx5: introduce thread safe linked list cache Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 22/34] net/mlx5: optimize shared RSS list operation Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 23/34] net/mlx5: make Rx queue thread safe Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 24/34] net/mlx5: make matcher list " Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 25/34] net/mlx5: make port ID action cache " Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 26/34] net/mlx5: make push VLAN " Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 27/34] net/mlx5: simplify sample attributes Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 28/34] net/mlx5: fix sample register error flow Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 29/34] net/mlx5: make sample and mirror action thread safe Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 30/34] net/mlx5: make tunnel offloading table " Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 31/34] net/mlx5: remove unused hash list operations Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 32/34] net/mlx5: make tunnel hub list thread safe Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 33/34] net/mlx5: make shared action " Suanming Mou
2020-10-28  9:00   ` [dpdk-dev] [PATCH v5 34/34] net/mlx5: remove shared context lock Suanming Mou
2020-10-28  9:33 ` [dpdk-dev] [PATCH v5 00/34] net/mlx5: support multiple-thread flow operations Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 01/34] net/mlx5: use thread safe index pool for flow objects Suanming Mou
2020-10-28 17:37     ` Raslan Darawsheh
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 02/34] net/mlx5: use thread specific flow workspace Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 03/34] net/mlx5: reuse flow Id as hairpin Id Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 04/34] net/mlx5: indexed pool supports zero size entry Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 05/34] net/mlx5: use indexed pool as ID generator Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 06/34] net/mlx5: make rte flow list thread safe Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 07/34] net/mlx5: make meter action " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 08/34] net/mlx5: make VLAN network interface " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 09/34] net/mlx5: create global jump action Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 10/34] net/mlx5: create global default miss action Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 11/34] net/mlx5: create global drop action Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 12/34] net/mlx5: support concurrent access for hash list Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 13/34] net/mlx5: add flow table tunnel offload attribute Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 14/34] net/mlx5: make flow table cache thread safe Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 15/34] net/mlx5: fix redundant Direct Verbs resources allocate Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 16/34] net/mlx5: make flow tag list thread safe Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 17/34] net/mlx5: make flow modify action " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 18/34] net/mlx5: remove unused mreg copy code Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 19/34] net/mlx5: make metadata copy flow list thread safe Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 20/34] net/mlx5: make header reformat action " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 21/34] net/mlx5: introduce thread safe linked list cache Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 22/34] net/mlx5: optimize shared RSS list operation Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 23/34] net/mlx5: make Rx queue thread safe Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 24/34] net/mlx5: make matcher list " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 25/34] net/mlx5: make port ID action cache " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 26/34] net/mlx5: make push VLAN " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 27/34] net/mlx5: simplify sample attributes Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 28/34] net/mlx5: fix sample register error flow Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 29/34] net/mlx5: make sample and mirror action thread safe Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 30/34] net/mlx5: make tunnel offloading table " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 31/34] net/mlx5: remove unused hash list operations Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 32/34] net/mlx5: make tunnel hub list thread safe Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 33/34] net/mlx5: make shared action " Suanming Mou
2020-10-28  9:33   ` [dpdk-dev] [PATCH v5 34/34] net/mlx5: remove shared context lock Suanming Mou

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1603801650-442376-24-git-send-email-suanmingm@nvidia.com \
    --to=suanmingm@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=shahafs@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.