All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
	"Slava Ovsiienko" <viacheslavo@nvidia.com>,
	Matan Azrad <matan@nvidia.com>
Subject: [dpdk-dev] [PATCH v4 08/14] net/mlx5: move Rx queue reference count
Date: Thu, 4 Nov 2021 20:33:14 +0800	[thread overview]
Message-ID: <20211104123320.1638915-9-xuemingl@nvidia.com> (raw)
In-Reply-To: <20211104123320.1638915-1-xuemingl@nvidia.com>

Rx queue reference count is counter of RQ, used to count reference to RQ
object. To prepare for shared Rx queue, this patch moves it from rxq_ctrl
to Rx queue private data.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Slava Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rx.h      |   8 +-
 drivers/net/mlx5/mlx5_rxq.c     | 169 +++++++++++++++++++++-----------
 drivers/net/mlx5/mlx5_trigger.c |  57 +++++------
 3 files changed, 142 insertions(+), 92 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index fa24f5cdf3a..eccfbf1108d 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -149,7 +149,6 @@ enum mlx5_rxq_type {
 struct mlx5_rxq_ctrl {
 	struct mlx5_rxq_data rxq; /* Data path structure. */
 	LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
-	uint32_t refcnt; /* Reference counter. */
 	LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
 	struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
 	struct mlx5_dev_ctx_shared *sh; /* Shared context. */
@@ -170,6 +169,7 @@ struct mlx5_rxq_ctrl {
 /* RX queue private data. */
 struct mlx5_rxq_priv {
 	uint16_t idx; /* Queue index. */
+	uint32_t refcnt; /* Reference counter. */
 	struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
 	LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
 	struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -207,7 +207,11 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
-struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);
+uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_verify(struct rte_eth_dev *dev);
 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 00df245a5c6..8071ddbd61c 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -386,15 +386,13 @@ mlx5_get_rx_port_offloads(void)
 static int
 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_ctrl *rxq_ctrl;
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
 
-	if (!(*priv->rxqs)[idx]) {
+	if (rxq == NULL) {
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
-	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-	return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
+	return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
 }
 
 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -874,8 +872,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
 
 	for (i = 0; i != n; ++i) {
 		/* This rxq obj must not be released in this function. */
-		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-		struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
+		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+		struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
 		int rc;
 
 		/* Skip queues that cannot request interrupts. */
@@ -885,11 +883,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
 			if (rte_intr_vec_list_index_set(intr_handle, i,
 			   RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
 				return -rte_errno;
-			/* Decrease the rxq_ctrl's refcnt */
-			if (rxq_ctrl)
-				mlx5_rxq_release(dev, i);
 			continue;
 		}
+		mlx5_rxq_ref(dev, i);
 		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
 			DRV_LOG(ERR,
 				"port %u too many Rx queues for interrupt"
@@ -954,7 +950,7 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
 		 * Need to access directly the queue to release the reference
 		 * kept in mlx5_rx_intr_vec_enable().
 		 */
-		mlx5_rxq_release(dev, i);
+		mlx5_rxq_deref(dev, i);
 	}
 free:
 	rte_intr_free_epoll_fd(intr_handle);
@@ -1003,19 +999,14 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
 int
 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-	struct mlx5_rxq_ctrl *rxq_ctrl;
-
-	rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
-	if (!rxq_ctrl)
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
+	if (!rxq)
 		goto error;
-	if (rxq_ctrl->irq) {
-		if (!rxq_ctrl->obj) {
-			mlx5_rxq_release(dev, rx_queue_id);
+	if (rxq->ctrl->irq) {
+		if (!rxq->ctrl->obj)
 			goto error;
-		}
-		mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
+		mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
 	}
-	mlx5_rxq_release(dev, rx_queue_id);
 	return 0;
 error:
 	rte_errno = EINVAL;
@@ -1037,23 +1028,21 @@ int
 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_ctrl *rxq_ctrl;
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
 	int ret = 0;
 
-	rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
-	if (!rxq_ctrl) {
+	if (!rxq) {
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
-	if (!rxq_ctrl->obj)
+	if (!rxq->ctrl->obj)
 		goto error;
-	if (rxq_ctrl->irq) {
-		ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
+	if (rxq->ctrl->irq) {
+		ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
 		if (ret < 0)
 			goto error;
-		rxq_ctrl->rxq.cq_arm_sn++;
+		rxq->ctrl->rxq.cq_arm_sn++;
 	}
-	mlx5_rxq_release(dev, rx_queue_id);
 	return 0;
 error:
 	/**
@@ -1064,12 +1053,9 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		rte_errno = errno;
 	else
 		rte_errno = EINVAL;
-	ret = rte_errno; /* Save rte_errno before cleanup. */
-	mlx5_rxq_release(dev, rx_queue_id);
-	if (ret != EAGAIN)
+	if (rte_errno != EAGAIN)
 		DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
 			dev->data->port_id, rx_queue_id);
-	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
 }
 
@@ -1657,7 +1643,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
 	tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
 #endif
 	tmpl->rxq.idx = idx;
-	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+	mlx5_rxq_ref(dev, idx);
 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
 	return tmpl;
 error:
@@ -1711,11 +1697,53 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
 	tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
 	tmpl->hairpin_conf = *hairpin_conf;
 	tmpl->rxq.idx = idx;
-	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+	mlx5_rxq_ref(dev, idx);
 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
 	return tmpl;
 }
 
+/**
+ * Increase Rx queue reference count.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_priv *
+mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+	if (rxq != NULL)
+		__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+	return rxq;
+}
+
+/**
+ * Dereference a Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   Updated reference count.
+ */
+uint32_t
+mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+	if (rxq == NULL)
+		return 0;
+	return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
 /**
  * Get a Rx queue.
  *
@@ -1727,18 +1755,52 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
  * @return
  *   A pointer to the queue if it exists, NULL otherwise.
  */
-struct mlx5_rxq_ctrl *
+struct mlx5_rxq_priv *
 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
 
-	if (rxq_data) {
-		rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-		__atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
-	}
-	return rxq_ctrl;
+	if (priv->rxq_privs == NULL)
+		return NULL;
+	return (*priv->rxq_privs)[idx];
+}
+
+/**
+ * Get Rx queue shareable control.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   A pointer to the queue control if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+	return rxq == NULL ? NULL : rxq->ctrl;
+}
+
+/**
+ * Get Rx queue shareable data.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   A pointer to the queue data if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_data *
+mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+	return rxq == NULL ? NULL : &rxq->ctrl->rxq;
 }
 
 /**
@@ -1756,13 +1818,12 @@ int
 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_ctrl *rxq_ctrl;
-	struct mlx5_rxq_priv *rxq = (*priv->rxq_privs)[idx];
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
 
 	if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
 		return 0;
-	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-	if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
+	if (mlx5_rxq_deref(dev, idx) > 1)
 		return 1;
 	if (rxq_ctrl->obj) {
 		priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
@@ -1774,7 +1835,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 		rxq_free_elts(rxq_ctrl);
 		dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
-	if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+	if (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
 			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
 		LIST_REMOVE(rxq, owner_entry);
@@ -1952,7 +2013,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
 		return 1;
 	priv->obj_ops.ind_table_destroy(ind_tbl);
 	for (i = 0; i != ind_tbl->queues_n; ++i)
-		claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
+		claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
 	mlx5_free(ind_tbl);
 	return 0;
 }
@@ -2009,7 +2070,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
 			       log2above(priv->config.ind_table_max_size);
 
 	for (i = 0; i != queues_n; ++i) {
-		if (!mlx5_rxq_get(dev, queues[i])) {
+		if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
 			ret = -rte_errno;
 			goto error;
 		}
@@ -2022,7 +2083,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
 error:
 	err = rte_errno;
 	for (j = 0; j < i; j++)
-		mlx5_rxq_release(dev, ind_tbl->queues[j]);
+		mlx5_rxq_deref(dev, ind_tbl->queues[j]);
 	rte_errno = err;
 	DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
 		dev->data->port_id);
@@ -2118,7 +2179,7 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
 			  bool standalone)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	unsigned int i, j;
+	unsigned int i;
 	int ret = 0, err;
 	const unsigned int n = rte_is_power_of_2(queues_n) ?
 			       log2above(queues_n) :
@@ -2138,15 +2199,11 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
 	ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
 	if (ret)
 		goto error;
-	for (j = 0; j < ind_tbl->queues_n; j++)
-		mlx5_rxq_release(dev, ind_tbl->queues[j]);
 	ind_tbl->queues_n = queues_n;
 	ind_tbl->queues = queues;
 	return 0;
 error:
 	err = rte_errno;
-	for (j = 0; j < i; j++)
-		mlx5_rxq_release(dev, queues[j]);
 	rte_errno = err;
 	DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
 		dev->data->port_id);
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index ebeeae279e2..e5d74d275f8 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -201,10 +201,12 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
 	DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
 		dev->data->port_id, priv->sh->device_attr.max_sge);
 	for (i = 0; i != priv->rxqs_n; ++i) {
-		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
+		struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
+		struct mlx5_rxq_ctrl *rxq_ctrl;
 
-		if (!rxq_ctrl)
+		if (rxq == NULL)
 			continue;
+		rxq_ctrl = rxq->ctrl;
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
 			/*
 			 * Pre-register the mempools. Regardless of whether
@@ -266,6 +268,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
 	struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
 	struct mlx5_txq_ctrl *txq_ctrl;
+	struct mlx5_rxq_priv *rxq;
 	struct mlx5_rxq_ctrl *rxq_ctrl;
 	struct mlx5_devx_obj *sq;
 	struct mlx5_devx_obj *rq;
@@ -310,9 +313,8 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
 			return -rte_errno;
 		}
 		sq = txq_ctrl->obj->sq;
-		rxq_ctrl = mlx5_rxq_get(dev,
-					txq_ctrl->hairpin_conf.peers[0].queue);
-		if (!rxq_ctrl) {
+		rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue);
+		if (rxq == NULL) {
 			mlx5_txq_release(dev, i);
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "port %u no rxq object found: %d",
@@ -320,6 +322,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
 				txq_ctrl->hairpin_conf.peers[0].queue);
 			return -rte_errno;
 		}
+		rxq_ctrl = rxq->ctrl;
 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
 		    rxq_ctrl->hairpin_conf.peers[0].queue != i) {
 			rte_errno = ENOMEM;
@@ -354,12 +357,10 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
 		rxq_ctrl->hairpin_status = 1;
 		txq_ctrl->hairpin_status = 1;
 		mlx5_txq_release(dev, i);
-		mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
 	}
 	return 0;
 error:
 	mlx5_txq_release(dev, i);
-	mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
 	return -rte_errno;
 }
 
@@ -432,27 +433,26 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
 		peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
 		mlx5_txq_release(dev, peer_queue);
 	} else { /* Peer port used as ingress. */
+		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue);
 		struct mlx5_rxq_ctrl *rxq_ctrl;
 
-		rxq_ctrl = mlx5_rxq_get(dev, peer_queue);
-		if (rxq_ctrl == NULL) {
+		if (rxq == NULL) {
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
 				dev->data->port_id, peer_queue);
 			return -rte_errno;
 		}
+		rxq_ctrl = rxq->ctrl;
 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
 				dev->data->port_id, peer_queue);
-			mlx5_rxq_release(dev, peer_queue);
 			return -rte_errno;
 		}
 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
 			rte_errno = ENOMEM;
 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
 				dev->data->port_id, peer_queue);
-			mlx5_rxq_release(dev, peer_queue);
 			return -rte_errno;
 		}
 		peer_info->qp_id = rxq_ctrl->obj->rq->id;
@@ -460,7 +460,6 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
 		peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
 		peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
 		peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
-		mlx5_rxq_release(dev, peer_queue);
 	}
 	return 0;
 }
@@ -559,34 +558,32 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
 			txq_ctrl->hairpin_status = 1;
 		mlx5_txq_release(dev, cur_queue);
 	} else {
+		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
 		struct mlx5_rxq_ctrl *rxq_ctrl;
 		struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
 
-		rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
-		if (rxq_ctrl == NULL) {
+		if (rxq == NULL) {
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
 				dev->data->port_id, cur_queue);
 			return -rte_errno;
 		}
+		rxq_ctrl = rxq->ctrl;
 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
 				dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return -rte_errno;
 		}
 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
 			rte_errno = ENOMEM;
 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
 				dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return -rte_errno;
 		}
 		if (rxq_ctrl->hairpin_status != 0) {
 			DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
 				dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return 0;
 		}
 		if (peer_info->tx_explicit !=
@@ -594,7 +591,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
 				" mismatch", dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return -rte_errno;
 		}
 		if (peer_info->manual_bind !=
@@ -602,7 +598,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
 				" mismatch", dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return -rte_errno;
 		}
 		rq_attr.state = MLX5_SQC_STATE_RDY;
@@ -612,7 +607,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
 		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
 		if (ret == 0)
 			rxq_ctrl->hairpin_status = 1;
-		mlx5_rxq_release(dev, cur_queue);
 	}
 	return ret;
 }
@@ -677,34 +671,32 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
 			txq_ctrl->hairpin_status = 0;
 		mlx5_txq_release(dev, cur_queue);
 	} else {
+		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
 		struct mlx5_rxq_ctrl *rxq_ctrl;
 		struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
 
-		rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
-		if (rxq_ctrl == NULL) {
+		if (rxq == NULL) {
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
 				dev->data->port_id, cur_queue);
 			return -rte_errno;
 		}
+		rxq_ctrl = rxq->ctrl;
 		if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
 			rte_errno = EINVAL;
 			DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
 				dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return -rte_errno;
 		}
 		if (rxq_ctrl->hairpin_status == 0) {
 			DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
 				dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return 0;
 		}
 		if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
 			rte_errno = ENOMEM;
 			DRV_LOG(ERR, "port %u no Rxq object found: %d",
 				dev->data->port_id, cur_queue);
-			mlx5_rxq_release(dev, cur_queue);
 			return -rte_errno;
 		}
 		rq_attr.state = MLX5_SQC_STATE_RST;
@@ -712,7 +704,6 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
 		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
 		if (ret == 0)
 			rxq_ctrl->hairpin_status = 0;
-		mlx5_rxq_release(dev, cur_queue);
 	}
 	return ret;
 }
@@ -1014,7 +1005,6 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_txq_ctrl *txq_ctrl;
-	struct mlx5_rxq_ctrl *rxq_ctrl;
 	uint32_t i;
 	uint16_t pp;
 	uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
@@ -1043,24 +1033,23 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 		}
 	} else {
 		for (i = 0; i < priv->rxqs_n; i++) {
-			rxq_ctrl = mlx5_rxq_get(dev, i);
-			if (!rxq_ctrl)
+			struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+			struct mlx5_rxq_ctrl *rxq_ctrl;
+
+			if (rxq == NULL)
 				continue;
-			if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
-				mlx5_rxq_release(dev, i);
+			rxq_ctrl = rxq->ctrl;
+			if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
 				continue;
-			}
 			pp = rxq_ctrl->hairpin_conf.peers[0].port;
 			if (pp >= RTE_MAX_ETHPORTS) {
 				rte_errno = ERANGE;
-				mlx5_rxq_release(dev, i);
 				DRV_LOG(ERR, "port %hu queue %u peer port "
 					"out of range %hu",
 					priv->dev_data->port_id, i, pp);
 				return -rte_errno;
 			}
 			bits[pp / 32] |= 1 << (pp % 32);
-			mlx5_rxq_release(dev, i);
 		}
 	}
 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
-- 
2.33.0


  parent reply	other threads:[~2021-11-04 12:35 UTC|newest]

Thread overview: 266+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-27  3:42 [dpdk-dev] [RFC] ethdev: introduce shared Rx queue Xueming Li
2021-07-28  7:56 ` Andrew Rybchenko
2021-07-28  8:20   ` Xueming(Steven) Li
2021-08-09 11:47 ` [dpdk-dev] [PATCH v1] " Xueming Li
2021-08-09 13:50   ` Jerin Jacob
2021-08-09 14:16     ` Xueming(Steven) Li
2021-08-11  8:02       ` Jerin Jacob
2021-08-11  8:28         ` Xueming(Steven) Li
2021-08-11 12:04           ` Ferruh Yigit
2021-08-11 12:59             ` Xueming(Steven) Li
2021-08-12 14:35               ` Xueming(Steven) Li
2021-09-15 15:34               ` Xueming(Steven) Li
2021-09-26  5:35             ` Xueming(Steven) Li
2021-09-28  9:35               ` Jerin Jacob
2021-09-28 11:36                 ` Xueming(Steven) Li
2021-09-28 11:37                 ` Xueming(Steven) Li
2021-09-28 11:37                 ` Xueming(Steven) Li
2021-09-28 12:58                   ` Jerin Jacob
2021-09-28 13:25                     ` Xueming(Steven) Li
2021-09-28 13:38                       ` Jerin Jacob
2021-09-28 13:59                         ` Ananyev, Konstantin
2021-09-28 14:40                           ` Xueming(Steven) Li
2021-09-28 14:59                             ` Jerin Jacob
2021-09-29  7:41                               ` Xueming(Steven) Li
2021-09-29  8:05                                 ` Jerin Jacob
2021-10-08  8:26                                   ` Xueming(Steven) Li
2021-10-10  9:46                                     ` Jerin Jacob
2021-10-10 13:40                                       ` Xueming(Steven) Li
2021-10-11  4:10                                         ` Jerin Jacob
2021-09-29  0:26                             ` Ananyev, Konstantin
2021-09-29  8:40                               ` Xueming(Steven) Li
2021-09-29 10:20                                 ` Ananyev, Konstantin
2021-09-29 13:25                                   ` Xueming(Steven) Li
2021-09-30  9:59                                     ` Ananyev, Konstantin
2021-10-06  7:54                                       ` Xueming(Steven) Li
2021-09-29  9:12                               ` Xueming(Steven) Li
2021-09-29  9:52                                 ` Ananyev, Konstantin
2021-09-29 11:07                                   ` Bruce Richardson
2021-09-29 11:46                                     ` Ananyev, Konstantin
2021-09-29 12:17                                       ` Bruce Richardson
2021-09-29 12:08                                   ` Xueming(Steven) Li
2021-09-29 12:35                                     ` Ananyev, Konstantin
2021-09-29 14:54                                       ` Xueming(Steven) Li
2021-09-28 14:51                         ` Xueming(Steven) Li
2021-09-28 12:59                 ` Xueming(Steven) Li
2021-08-11 14:04 ` [dpdk-dev] [PATCH v2 01/15] " Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 02/15] app/testpmd: dump port and queue info for each packet Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 03/15] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 04/15] app/testpmd: make sure shared Rx queue polled on same core Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 05/15] app/testpmd: adds common forwarding for shared Rx queue Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 06/15] app/testpmd: add common fwd wrapper function Xueming Li
2021-08-17  9:37     ` Jerin Jacob
2021-08-18 11:27       ` Xueming(Steven) Li
2021-08-18 11:47         ` Jerin Jacob
2021-08-18 14:08           ` Xueming(Steven) Li
2021-08-26 11:28             ` Jerin Jacob
2021-08-29  7:07               ` Xueming(Steven) Li
2021-09-01 14:44                 ` Xueming(Steven) Li
2021-09-28  5:54                   ` Xueming(Steven) Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 07/15] app/testpmd: support shared Rx queues for IO forwarding Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 08/15] app/testpmd: support shared Rx queue for rxonly forwarding Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 09/15] app/testpmd: support shared Rx queue for icmpecho fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 10/15] app/testpmd: support shared Rx queue for csum fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 11/15] app/testpmd: support shared Rx queue for flowgen Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 12/15] app/testpmd: support shared Rx queue for MAC fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 13/15] app/testpmd: support shared Rx queue for macswap fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 14/15] app/testpmd: support shared Rx queue for 5tuple fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 15/15] app/testpmd: support shared Rx queue for ieee1588 fwd Xueming Li
2021-08-17  9:33   ` [dpdk-dev] [PATCH v2 01/15] ethdev: introduce shared Rx queue Jerin Jacob
2021-08-17 11:31     ` Xueming(Steven) Li
2021-08-17 15:11       ` Jerin Jacob
2021-08-18 11:14         ` Xueming(Steven) Li
2021-08-19  5:26           ` Jerin Jacob
2021-08-19 12:09             ` Xueming(Steven) Li
2021-08-26 11:58               ` Jerin Jacob
2021-08-28 14:16                 ` Xueming(Steven) Li
2021-08-30  9:31                   ` Jerin Jacob
2021-08-30 10:13                     ` Xueming(Steven) Li
2021-09-15 14:45                     ` Xueming(Steven) Li
2021-09-16  4:16                       ` Jerin Jacob
2021-09-28  5:50                         ` Xueming(Steven) Li
2021-09-17  8:01 ` [dpdk-dev] [PATCH v3 0/8] " Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 1/8] " Xueming Li
2021-09-27 23:53     ` Ajit Khaparde
2021-09-28 14:24       ` Xueming(Steven) Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 2/8] ethdev: new API to aggregate shared Rx queue group Xueming Li
2021-09-26 17:54     ` Ajit Khaparde
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 3/8] app/testpmd: dump port and queue info for each packet Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 4/8] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 5/8] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 6/8] app/testpmd: add common fwd wrapper Xueming Li
2021-09-17 11:24     ` Jerin Jacob
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 7/8] app/testpmd: improve forwarding cache miss Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 8/8] app/testpmd: support shared Rx queue forwarding Xueming Li
2021-09-30 14:55 ` [dpdk-dev] [PATCH v4 0/6] ethdev: introduce shared Rx queue Xueming Li
2021-09-30 14:55   ` [dpdk-dev] [PATCH v4 1/6] " Xueming Li
2021-10-11 10:47     ` Andrew Rybchenko
2021-10-11 13:12       ` Xueming(Steven) Li
2021-09-30 14:55   ` [dpdk-dev] [PATCH v4 2/6] ethdev: new API to aggregate shared Rx queue group Xueming Li
2021-09-30 14:55   ` [dpdk-dev] [PATCH v4 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-09-30 14:56   ` [dpdk-dev] [PATCH v4 4/6] app/testpmd: dump port info for " Xueming Li
2021-09-30 14:56   ` [dpdk-dev] [PATCH v4 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-09-30 14:56   ` [dpdk-dev] [PATCH v4 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-11 11:49   ` [dpdk-dev] [PATCH v4 0/6] ethdev: introduce " Andrew Rybchenko
2021-10-11 15:11     ` Xueming(Steven) Li
2021-10-12  6:37       ` Xueming(Steven) Li
2021-10-12  8:48         ` Andrew Rybchenko
2021-10-12 10:55           ` Xueming(Steven) Li
2021-10-12 11:28             ` Andrew Rybchenko
2021-10-12 11:33               ` Xueming(Steven) Li
2021-10-13  7:53               ` Xueming(Steven) Li
2021-10-11 12:37 ` [dpdk-dev] [PATCH v5 0/5] " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 1/5] " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 2/5] app/testpmd: new parameter to enable " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 3/5] app/testpmd: dump port info for " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 4/5] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 5/5] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-12 14:39 ` [dpdk-dev] [PATCH v6 0/5] ethdev: introduce " Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 1/5] " Xueming Li
2021-10-15  9:28     ` Andrew Rybchenko
2021-10-15 10:54       ` Xueming(Steven) Li
2021-10-18  6:46         ` Andrew Rybchenko
2021-10-18  6:57           ` Xueming(Steven) Li
2021-10-15 17:20     ` Ferruh Yigit
2021-10-16  9:14       ` Xueming(Steven) Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 2/5] app/testpmd: new parameter to enable " Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 3/5] app/testpmd: dump port info for " Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 4/5] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 5/5] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-16  8:42 ` [dpdk-dev] [PATCH v7 0/5] ethdev: introduce " Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 1/5] " Xueming Li
2021-10-17  5:33     ` Ajit Khaparde
2021-10-17  7:29       ` Xueming(Steven) Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 2/5] app/testpmd: new parameter to enable " Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 3/5] app/testpmd: dump port info for " Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 4/5] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 5/5] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-18 12:59 ` [dpdk-dev] [PATCH v8 0/6] ethdev: introduce " Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 1/6] " Xueming Li
2021-10-19  0:21     ` Ajit Khaparde
2021-10-19  5:54       ` Xueming(Steven) Li
2021-10-19  6:28     ` Andrew Rybchenko
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 2/6] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 4/6] app/testpmd: dump port info for " Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-19  8:17 ` [dpdk-dev] [PATCH v9 0/6] ethdev: introduce " Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 1/6] " Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 2/6] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-19  8:33     ` Andrew Rybchenko
2021-10-19  9:10       ` Xueming(Steven) Li
2021-10-19  9:39         ` Andrew Rybchenko
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 4/6] app/testpmd: dump port info for " Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-19 15:20 ` [dpdk-dev] [PATCH v10 0/6] ethdev: introduce " Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 1/6] ethdev: new API to resolve device capability name Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 2/6] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 4/6] app/testpmd: dump port info for " Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-19 15:28 ` [dpdk-dev] [PATCH v10 0/7] ethdev: introduce " Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 1/7] " Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 2/7] ethdev: new API to resolve device capability name Xueming Li
2021-10-19 17:57     ` Andrew Rybchenko
2021-10-20  7:47       ` Xueming(Steven) Li
2021-10-20  7:48         ` Andrew Rybchenko
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-20  7:53 ` [dpdk-dev] [PATCH v11 0/7] ethdev: introduce " Xueming Li
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 1/7] " Xueming Li
2021-10-20 17:14     ` Ajit Khaparde
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 2/7] ethdev: new API to resolve device capability name Xueming Li
2021-10-20 10:52     ` Andrew Rybchenko
2021-10-20 17:16       ` Ajit Khaparde
2021-10-20 18:42     ` Thomas Monjalon
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-21  3:28       ` Ajit Khaparde
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-20 17:29     ` Ajit Khaparde
2021-10-20 19:14       ` Thomas Monjalon
2021-10-21  4:09         ` Xueming(Steven) Li
2021-10-21  3:49       ` Xueming(Steven) Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-21  3:58       ` Xueming(Steven) Li
2021-10-21  5:15         ` Li, Xiaoyun
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-21  4:21       ` Xueming(Steven) Li
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-20 19:20     ` Thomas Monjalon
2021-10-21  3:26       ` Li, Xiaoyun
2021-10-21  4:39       ` Xueming(Steven) Li
2021-10-21  5:08 ` [dpdk-dev] [PATCH v12 0/7] ethdev: introduce " Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 1/7] " Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 2/7] ethdev: get device capability name as string Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-21  9:20     ` Thomas Monjalon
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-21  6:35     ` Li, Xiaoyun
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-21  6:33     ` Li, Xiaoyun
2021-10-21  7:58       ` Xueming(Steven) Li
2021-10-21  8:01         ` Li, Xiaoyun
2021-10-21  8:22           ` Xueming(Steven) Li
2021-10-21  9:28     ` Thomas Monjalon
2021-10-21 10:41 ` [dpdk-dev] [PATCH v13 0/7] ethdev: introduce " Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 1/7] " Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 2/7] ethdev: get device capability name as string Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-21 19:45     ` Ajit Khaparde
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-21 19:48     ` Ajit Khaparde
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-21 23:41   ` [dpdk-dev] [PATCH v13 0/7] ethdev: introduce " Ferruh Yigit
2021-10-22  6:31     ` Xueming(Steven) Li
2021-11-04 15:52   ` Tom Barbette
2021-11-03  7:58 ` [dpdk-dev] [PATCH v3 00/14] net/mlx5: support " Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 01/14] common/mlx5: introduce user index field in completion Xueming Li
2021-11-04  9:14     ` Slava Ovsiienko
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 02/14] net/mlx5: fix field reference for PPC Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 03/14] common/mlx5: adds basic receive memory pool support Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 04/14] common/mlx5: support receive memory pool Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 05/14] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 06/14] net/mlx5: clean Rx queue code Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 07/14] net/mlx5: split Rx queue into shareable and private Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 08/14] net/mlx5: move Rx queue reference count Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 09/14] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 10/14] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 11/14] net/mlx5: move Rx queue DevX resource Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 12/14] net/mlx5: remove Rx queue data list from device Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 13/14] net/mlx5: support shared Rx queue Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 14/14] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-11-04 12:33 ` [dpdk-dev] [PATCH v4 00/14] net/mlx5: support shared Rx queue Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 01/14] common/mlx5: introduce user index field in completion Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 02/14] net/mlx5: fix field reference for PPC Xueming Li
2021-11-04 17:07     ` Raslan Darawsheh
2021-11-04 17:49     ` David Christensen
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 03/14] common/mlx5: adds basic receive memory pool support Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 04/14] common/mlx5: support receive memory pool Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 05/14] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 06/14] net/mlx5: clean Rx queue code Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 07/14] net/mlx5: split Rx queue into shareable and private Xueming Li
2021-11-04 12:33   ` Xueming Li [this message]
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 09/14] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 10/14] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 11/14] net/mlx5: move Rx queue DevX resource Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 12/14] net/mlx5: remove Rx queue data list from device Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 13/14] net/mlx5: support shared Rx queue Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 14/14] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-11-04 17:50     ` David Christensen
2021-11-05  6:40     ` Ruifeng Wang
2021-11-04 20:06   ` [dpdk-dev] [PATCH v4 00/14] net/mlx5: support shared Rx queue Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211104123320.1638915-9-xuemingl@nvidia.com \
    --to=xuemingl@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=lmargalit@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.