All of lore.kernel.org
 help / color / mirror / Atom feed
From: Li Zhang <lizh@nvidia.com>
To: <orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,
	<shahafs@nvidia.com>
Cc: <dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,
	<roniba@nvidia.com>, Yajun Wu <yajunw@nvidia.com>
Subject: [PATCH v1 16/17] vdpa/mlx5: add virtq sub-resources creation
Date: Mon, 6 Jun 2022 14:46:49 +0300	[thread overview]
Message-ID: <20220606114650.209612-17-lizh@nvidia.com> (raw)
In-Reply-To: <20220606114650.209612-1-lizh@nvidia.com>

pre-created virt-queue sub-resource in device probe stage
and then modify virtqueue in device config stage.
Steer table also need to support dummy virt-queue.
This accelerates the LM process and reduces its time by 40%.

Signed-off-by: Li Zhang <lizh@nvidia.com>
Signed-off-by: Yajun Wu <yajunw@nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 72 +++++++--------------
 drivers/vdpa/mlx5/mlx5_vdpa.h       | 17 +++--
 drivers/vdpa/mlx5/mlx5_vdpa_event.c | 11 ++--
 drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 17 +++--
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 99 +++++++++++++++++++++--------
 5 files changed, 123 insertions(+), 93 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index d000854c08..f006a9cd3f 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -627,65 +627,39 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,
 static int
 mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
 {
-	struct mlx5_vdpa_virtq *virtq;
+	uint32_t max_queues;
 	uint32_t index;
-	uint32_t i;
+	struct mlx5_vdpa_virtq *virtq;
 
-	for (index = 0; index < priv->caps.max_num_virtio_queues * 2;
+	for (index = 0; index < priv->caps.max_num_virtio_queues;
 		index++) {
 		virtq = &priv->virtqs[index];
 		pthread_mutex_init(&virtq->virtq_lock, NULL);
 	}
-	if (!priv->queues)
+	if (!priv->queues || !priv->queue_size)
 		return 0;
-	for (index = 0; index < (priv->queues * 2); ++index) {
+	max_queues = (priv->queues < priv->caps.max_num_virtio_queues) ?
+		(priv->queues * 2) : (priv->caps.max_num_virtio_queues);
+	for (index = 0; index < max_queues; ++index)
+		if (mlx5_vdpa_virtq_single_resource_prepare(priv,
+			index))
+			goto error;
+	if (mlx5_vdpa_is_modify_virtq_supported(priv))
+		if (mlx5_vdpa_steer_update(priv, true))
+			goto error;
+	return 0;
+error:
+	for (index = 0; index < max_queues; ++index) {
 		virtq = &priv->virtqs[index];
-		int ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,
-					-1, virtq);
-
-		if (ret) {
-			DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
-				index);
-			return -1;
-		}
-		if (priv->caps.queue_counters_valid) {
-			if (!virtq->counters)
-				virtq->counters =
-					mlx5_devx_cmd_create_virtio_q_counters
-						(priv->cdev->ctx);
-			if (!virtq->counters) {
-				DRV_LOG(ERR, "Failed to create virtq couners for virtq"
-					" %d.", index);
-				return -1;
-			}
-		}
-		for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
-			uint32_t size;
-			void *buf;
-			struct mlx5dv_devx_umem *obj;
-
-			size = priv->caps.umems[i].a * priv->queue_size +
-					priv->caps.umems[i].b;
-			buf = rte_zmalloc(__func__, size, 4096);
-			if (buf == NULL) {
-				DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
-						" %u.", i, index);
-				return -1;
-			}
-			obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx, buf,
-					size, IBV_ACCESS_LOCAL_WRITE);
-			if (obj == NULL) {
-				rte_free(buf);
-				DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
-						i, index);
-				return -1;
-			}
-			virtq->umems[i].size = size;
-			virtq->umems[i].buf = buf;
-			virtq->umems[i].obj = obj;
+		if (virtq->virtq) {
+			pthread_mutex_lock(&virtq->virtq_lock);
+			mlx5_vdpa_virtq_unset(virtq);
+			pthread_mutex_unlock(&virtq->virtq_lock);
 		}
 	}
-	return 0;
+	if (mlx5_vdpa_is_modify_virtq_supported(priv))
+		mlx5_vdpa_steer_unset(priv);
+	return -1;
 }
 
 static int
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index b6392b9d66..f353db62ac 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -277,13 +277,15 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
  *   The guest notification file descriptor.
  * @param[in/out] virtq
  *   Pointer to the virt-queue structure.
+ * @param[in] reset
+ *   If true, it will reset event qp.
  *
  * @return
  *   0 on success, -1 otherwise and rte_errno is set.
  */
 int
 mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
-	int callfd, struct mlx5_vdpa_virtq *virtq);
+	int callfd, struct mlx5_vdpa_virtq *virtq, bool reset);
 
 /**
  * Destroy an event QP and all its related resources.
@@ -403,11 +405,13 @@ void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
  *
  * @param[in] priv
  *   The vdpa driver private structure.
+ * @param[in] is_dummy
+ *   If set, it is updated with dummy queue for prepare resource.
  *
  * @return
  *   0 on success, a negative value otherwise.
  */
-int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
+int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy);
 
 /**
  * Setup steering and all its related resources to enable RSS traffic from the
@@ -581,9 +585,14 @@ mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
 int
 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
 void
-mlx5_vdpa_vq_destroy(struct mlx5_vdpa_virtq *virtq);
-void
 mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv);
 void
 mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv);
+bool
+mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,
+		int index);
+int
+mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp);
+void
+mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq);
 #endif /* RTE_PMD_MLX5_VDPA_H_ */
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index f782b6b832..22f0920c88 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -249,7 +249,7 @@ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
 {
 	unsigned int i;
 
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
 
 		mlx5_vdpa_queue_complete(cq);
@@ -618,7 +618,7 @@ mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
 	return 0;
 }
 
-static int
+int
 mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
 {
 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,
@@ -638,7 +638,7 @@ mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
 
 int
 mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
-	int callfd, struct mlx5_vdpa_virtq *virtq)
+	int callfd, struct mlx5_vdpa_virtq *virtq, bool reset)
 {
 	struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
 	struct mlx5_devx_qp_attr attr = {0};
@@ -649,11 +649,10 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 		/* Reuse existing resources. */
 		eqp->cq.callfd = callfd;
 		/* FW will set event qp to error state in q destroy. */
-		if (!mlx5_vdpa_qps2rst2rts(eqp)) {
+		if (reset && !mlx5_vdpa_qps2rst2rts(eqp))
 			rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
 					&eqp->sw_qp.db_rec[0]);
-			return 0;
-		}
+		return 0;
 	}
 	if (eqp->fw_qp)
 		mlx5_vdpa_event_qp_destroy(eqp);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index 4cbf09784e..c2e0a17ace 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -57,7 +57,7 @@ mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
  * -1 on error.
  */
 static int
-mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
+mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv, bool is_dummy)
 {
 	int i;
 	uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
@@ -67,15 +67,20 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
 						      sizeof(uint32_t), 0);
 	uint32_t k = 0, j;
 	int ret = 0, num;
+	uint16_t nr_vring = is_dummy ?
+	(((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?
+	(priv->queues * 2) : priv->caps.max_num_virtio_queues) : priv->nr_virtqs;
 
 	if (!attr) {
 		DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
 		rte_errno = ENOMEM;
 		return -ENOMEM;
 	}
-	for (i = 0; i < priv->nr_virtqs; i++) {
+	for (i = 0; i < nr_vring; i++) {
 		if (is_virtq_recvq(i, priv->nr_virtqs) &&
-		    priv->virtqs[i].enable && priv->virtqs[i].virtq) {
+			(is_dummy || (priv->virtqs[i].enable &&
+			priv->virtqs[i].configured)) &&
+			priv->virtqs[i].virtq) {
 			attr->rq_list[k] = priv->virtqs[i].virtq->id;
 			k++;
 		}
@@ -235,12 +240,12 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
 }
 
 int
-mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
+mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy)
 {
 	int ret;
 
 	pthread_mutex_lock(&priv->steer_update_lock);
-	ret = mlx5_vdpa_rqt_prepare(priv);
+	ret = mlx5_vdpa_rqt_prepare(priv, is_dummy);
 	if (ret == 0) {
 		mlx5_vdpa_steer_unset(priv);
 	} else if (ret < 0) {
@@ -261,7 +266,7 @@ mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
 int
 mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
 {
-	if (mlx5_vdpa_steer_update(priv))
+	if (mlx5_vdpa_steer_update(priv, false))
 		goto error;
 	return 0;
 error:
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index a08c854b14..20ce382487 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -146,10 +146,10 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
 	}
 }
 
-static int
+void
 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
 {
-	int ret = -EAGAIN;
+	int ret;
 
 	mlx5_vdpa_virtq_unregister_intr_handle(virtq);
 	if (virtq->configured) {
@@ -157,12 +157,12 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
 		if (ret)
 			DRV_LOG(WARNING, "Failed to stop virtq %d.",
 				virtq->index);
-		virtq->configured = 0;
 		claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
+		virtq->index = 0;
+		virtq->virtq = NULL;
+		virtq->configured = 0;
 	}
-	virtq->virtq = NULL;
 	virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
-	return 0;
 }
 
 void
@@ -175,6 +175,9 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
 		virtq = &priv->virtqs[i];
 		pthread_mutex_lock(&virtq->virtq_lock);
 		mlx5_vdpa_virtq_unset(virtq);
+		if (i < (priv->queues * 2))
+			mlx5_vdpa_virtq_single_resource_prepare(
+					priv, i);
 		pthread_mutex_unlock(&virtq->virtq_lock);
 	}
 	priv->features = 0;
@@ -258,7 +261,8 @@ mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
 static int
 mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 		struct mlx5_devx_virtq_attr *attr,
-		struct rte_vhost_vring *vq, int index)
+		struct rte_vhost_vring *vq,
+		int index, bool is_prepare)
 {
 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
 	uint64_t gpa;
@@ -277,11 +281,15 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 			MLX5_VIRTQ_MODIFY_TYPE_Q_MKEY |
 			MLX5_VIRTQ_MODIFY_TYPE_QUEUE_FEATURE_BIT_MASK |
 			MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;
-	attr->tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
-	attr->tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
-	attr->tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
-	attr->rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
-	attr->virtio_version_1_0 =
+	attr->tso_ipv4 = is_prepare ? 1 :
+		!!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
+	attr->tso_ipv6 = is_prepare ? 1 :
+		!!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
+	attr->tx_csum = is_prepare ? 1 :
+		!!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
+	attr->rx_csum = is_prepare ? 1 :
+		!!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
+	attr->virtio_version_1_0 = is_prepare ? 1 :
 		!!(priv->features & (1ULL << VIRTIO_F_VERSION_1));
 	attr->q_type =
 		(priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
@@ -290,12 +298,12 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 	 * No need event QPs creation when the guest in poll mode or when the
 	 * capability allows it.
 	 */
-	attr->event_mode = vq->callfd != -1 ||
+	attr->event_mode = is_prepare || vq->callfd != -1 ||
 	!(priv->caps.event_mode & (1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
 	MLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
 	if (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
-		ret = mlx5_vdpa_event_qp_prepare(priv,
-				vq->size, vq->callfd, virtq);
+		ret = mlx5_vdpa_event_qp_prepare(priv, vq->size,
+				vq->callfd, virtq, !virtq->virtq);
 		if (ret) {
 			DRV_LOG(ERR,
 				"Failed to create event QPs for virtq %d.",
@@ -320,7 +328,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 		attr->counters_obj_id = virtq->counters->id;
 	}
 	/* Setup 3 UMEMs for each virtq. */
-	if (virtq->virtq) {
+	if (!virtq->virtq) {
 		for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
 			uint32_t size;
 			void *buf;
@@ -345,7 +353,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 			buf = rte_zmalloc(__func__,
 				size, 4096);
 			if (buf == NULL) {
-				DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
+				DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq."
 				" %u.", i, index);
 				return -1;
 			}
@@ -366,7 +374,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 			attr->umems[i].size = virtq->umems[i].size;
 		}
 	}
-	if (attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {
+	if (!is_prepare && attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {
 		gpa = mlx5_vdpa_hva_to_gpa(priv->vmem_info.vmem,
 					   (uint64_t)(uintptr_t)vq->desc);
 		if (!gpa) {
@@ -389,21 +397,23 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 		}
 		attr->available_addr = gpa;
 	}
-	ret = rte_vhost_get_vring_base(priv->vid,
+	if (!is_prepare) {
+		ret = rte_vhost_get_vring_base(priv->vid,
 			index, &last_avail_idx, &last_used_idx);
-	if (ret) {
-		last_avail_idx = 0;
-		last_used_idx = 0;
-		DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0.");
-	} else {
-		DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
+		if (ret) {
+			last_avail_idx = 0;
+			last_used_idx = 0;
+			DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0.");
+		} else {
+			DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
 				"virtq %d.", priv->vid, last_avail_idx,
 				last_used_idx, index);
+		}
 	}
 	attr->hw_available_index = last_avail_idx;
 	attr->hw_used_index = last_used_idx;
 	attr->q_size = vq->size;
-	attr->mkey = priv->gpa_mkey_index;
+	attr->mkey = is_prepare ? 0 : priv->gpa_mkey_index;
 	attr->tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
 	attr->queue_index = index;
 	attr->pd = priv->cdev->pdn;
@@ -416,6 +426,39 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
 	return 0;
 }
 
+bool
+mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,
+		int index)
+{
+	struct mlx5_devx_virtq_attr attr = {0};
+	struct mlx5_vdpa_virtq *virtq;
+	struct rte_vhost_vring vq = {
+		.size = priv->queue_size,
+		.callfd = -1,
+	};
+	int ret;
+
+	virtq = &priv->virtqs[index];
+	virtq->index = index;
+	virtq->vq_size = vq.size;
+	virtq->configured = 0;
+	virtq->virtq = NULL;
+	ret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr, &vq, index, true);
+	if (ret) {
+		DRV_LOG(ERR,
+		"Cannot prepare setup resource for virtq %d.", index);
+		return true;
+	}
+	if (mlx5_vdpa_is_modify_virtq_supported(priv)) {
+		virtq->virtq =
+		mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
+		virtq->priv = priv;
+		if (!virtq->virtq)
+			return true;
+	}
+	return false;
+}
+
 bool
 mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv)
 {
@@ -473,7 +516,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick)
 	virtq->priv = priv;
 	virtq->stopped = 0;
 	ret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr,
-				&vq, index);
+				&vq, index, false);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to setup update virtq attr"
 			" %d.", index);
@@ -746,7 +789,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
 	if (virtq->configured) {
 		virtq->enable = 0;
 		if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
-			ret = mlx5_vdpa_steer_update(priv);
+			ret = mlx5_vdpa_steer_update(priv, false);
 			if (ret)
 				DRV_LOG(WARNING, "Failed to disable steering "
 					"for virtq %d.", index);
@@ -761,7 +804,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
 		}
 		virtq->enable = 1;
 		if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
-			ret = mlx5_vdpa_steer_update(priv);
+			ret = mlx5_vdpa_steer_update(priv, false);
 			if (ret)
 				DRV_LOG(WARNING, "Failed to enable steering "
 					"for virtq %d.", index);
-- 
2.31.1


  parent reply	other threads:[~2022-06-06 11:49 UTC|newest]

Thread overview: 137+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-08  7:55 [RFC 00/15] Add vDPA multi-threads optiomization Li Zhang
2022-04-08  7:55 ` [RFC 01/15] examples/vdpa: fix vDPA device remove Li Zhang
2022-04-08  7:55 ` [RFC 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-04-08  7:55 ` [RFC 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-04-08  7:55 ` [RFC 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-04-08  7:55 ` [RFC 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-04-08  7:55 ` [RFC 06/15] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-04-08  7:55 ` [RFC 07/15] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-04-08  7:55 ` [RFC 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-04-08  7:55 ` [RFC 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-04-08  7:56 ` [RFC 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-04-08  7:56 ` [RFC 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-04-08  7:56 ` [RFC 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-04-08  7:56 ` [RFC 13/15] vdpa/mlx5: add device close task Li Zhang
2022-04-08  7:56 ` [RFC 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-04-08  7:56 ` [RFC 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-06 11:20 ` [PATCH v1 00/17] Add vDPA multi-threads optiomization Li Zhang
2022-06-06 11:20   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-06 11:20   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
2022-06-06 11:20   ` [PATCH 02/16] examples/vdpa: fix vDPA device remove Li Zhang
2022-06-06 11:20   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
2022-06-06 11:20   ` [PATCH 03/16] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-06 11:20   ` [PATCH 04/16] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-06 11:20   ` [PATCH v1 04/17] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-06 11:20   ` [PATCH v1 05/17] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-06 11:20   ` [PATCH 05/16] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-06 11:20   ` [PATCH 06/16] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-06 11:20   ` [PATCH v1 06/17] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-06 11:20   ` [PATCH v1 07/17] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-06 11:20   ` [PATCH 07/16] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-06 11:20   ` [PATCH 08/16] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-06 11:20   ` [PATCH v1 08/17] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-06 11:20   ` [PATCH 09/16] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-06 11:20   ` [PATCH v1 09/17] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-06 11:20   ` [PATCH v1 10/17] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-06 11:20   ` [PATCH 10/16] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-06 11:20   ` [PATCH 11/16] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-06 11:20   ` [PATCH v1 11/17] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-06 11:20   ` [PATCH v1 12/17] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-06 11:21   ` [PATCH 12/16] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-06 11:21   ` [PATCH 13/16] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-06 11:21   ` [PATCH v1 13/17] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-06 11:21   ` [PATCH 14/16] vdpa/mlx5: add device close task Li Zhang
2022-06-06 11:21   ` [PATCH v1 14/17] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-06 11:21   ` [PATCH v1 15/17] vdpa/mlx5: add device close task Li Zhang
2022-06-06 11:21   ` [PATCH 15/16] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-06 11:21   ` [PATCH v1 16/17] " Li Zhang
2022-06-06 11:21   ` [PATCH 16/16] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-06 11:21   ` [PATCH v1 17/17] " Li Zhang
2022-06-06 11:46 ` [PATCH v1 00/17] Add vDPA multi-threads optiomization Li Zhang
2022-06-06 11:46   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-06 11:46   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
2022-06-06 11:46   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
2022-06-06 11:46   ` [PATCH v1 04/17] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-06 11:46   ` [PATCH v1 05/17] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-06 11:46   ` [PATCH v1 06/17] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-06 11:46   ` [PATCH v1 07/17] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-06 11:46   ` [PATCH v1 08/17] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-06 11:46   ` [PATCH v1 09/17] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-06 11:46   ` [PATCH v1 10/17] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-06 11:46   ` [PATCH v1 11/17] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-06 11:46   ` [PATCH v1 12/17] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-06 11:46   ` [PATCH v1 13/17] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-06 11:46   ` [PATCH v1 14/17] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-06 11:46   ` [PATCH v1 15/17] vdpa/mlx5: add device close task Li Zhang
2022-06-06 11:46   ` Li Zhang [this message]
2022-06-06 11:46   ` [PATCH v1 17/17] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-16  2:29 ` [PATCH v2 00/15] mlx5/vdpa: optimize live migration time Li Zhang
2022-06-16  2:29   ` [PATCH v2 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-17 14:27     ` Maxime Coquelin
2022-06-16  2:29   ` [PATCH v2 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-17 15:36     ` Maxime Coquelin
2022-06-18  8:04       ` Li Zhang
2022-06-16  2:30   ` [PATCH v2 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-17 15:41     ` Maxime Coquelin
2022-06-16  2:30   ` [PATCH v2 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-16  2:30   ` [PATCH v2 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-17 15:45     ` Maxime Coquelin
2022-06-16  2:30   ` [PATCH v2 06/15] vdpa/mlx5: pre-create virtq in the prob Li Zhang
2022-06-17 15:53     ` Maxime Coquelin
2022-06-18  7:54       ` Li Zhang
2022-06-16  2:30   ` [PATCH v2 07/15] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-16  2:30   ` [PATCH v2 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-16  2:30   ` [PATCH v2 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-16  2:30   ` [PATCH v2 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-16  2:30   ` [PATCH v2 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-16  2:30   ` [PATCH v2 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-16  2:30   ` [PATCH v2 13/15] vdpa/mlx5: add device close task Li Zhang
2022-06-16  2:30   ` [PATCH v2 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-16  2:30   ` [PATCH v2 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-16  7:24   ` [PATCH v2 00/15] mlx5/vdpa: optimize live migration time Maxime Coquelin
2022-06-16  9:02     ` Maxime Coquelin
2022-06-17  1:49       ` Li Zhang
2022-06-18  8:47 ` [PATCH v3 " Li Zhang
2022-06-18  8:47   ` [PATCH v3 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-18  8:47   ` [PATCH v3 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-18  8:47   ` [PATCH v3 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-18  8:47   ` [PATCH v3 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-18  8:47   ` [PATCH v3 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-18  8:47   ` [PATCH v3 06/15] vdpa/mlx5: pre-create virtq at probe time Li Zhang
2022-06-18  8:47   ` [PATCH v3 07/15] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-18  8:47   ` [PATCH v3 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-18  8:47   ` [PATCH v3 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-18  8:48   ` [PATCH v3 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-18  8:48   ` [PATCH v3 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-18  8:48   ` [PATCH v3 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-18  8:48   ` [PATCH v3 13/15] vdpa/mlx5: add device close task Li Zhang
2022-06-18  8:48   ` [PATCH v3 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-18  8:48   ` [PATCH v3 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-18  9:02 ` [PATCH v4 00/15] mlx5/vdpa: optimize live migration time Li Zhang
2022-06-18  9:02   ` [PATCH v4 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-18  9:02   ` [PATCH v4 02/15] vdpa/mlx5: support pre create virtq resource Li Zhang
2022-06-18  9:02   ` [PATCH v4 03/15] common/mlx5: add DevX API to move QP to reset state Li Zhang
2022-06-18  9:02   ` [PATCH v4 04/15] vdpa/mlx5: support event qp reuse Li Zhang
2022-06-20  8:27     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 05/15] common/mlx5: extend virtq modifiable fields Li Zhang
2022-06-20  9:01     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 06/15] vdpa/mlx5: pre-create virtq at probe time Li Zhang
2022-06-18  9:02   ` [PATCH v4 07/15] vdpa/mlx5: optimize datapath-control synchronization Li Zhang
2022-06-20  9:25     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 08/15] vdpa/mlx5: add multi-thread management for configuration Li Zhang
2022-06-20 10:57     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 09/15] vdpa/mlx5: add task ring for MT management Li Zhang
2022-06-20 15:05     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 10/15] vdpa/mlx5: add MT task for VM memory registration Li Zhang
2022-06-20 15:12     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 11/15] vdpa/mlx5: add virtq creation task for MT management Li Zhang
2022-06-20 15:19     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 12/15] vdpa/mlx5: add virtq LM log task Li Zhang
2022-06-20 15:42     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 13/15] vdpa/mlx5: add device close task Li Zhang
2022-06-20 15:54     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 14/15] vdpa/mlx5: add virtq sub-resources creation Li Zhang
2022-06-20 16:01     ` Maxime Coquelin
2022-06-18  9:02   ` [PATCH v4 15/15] vdpa/mlx5: prepare virtqueue resource creation Li Zhang
2022-06-20 16:30     ` Maxime Coquelin
2022-06-21  9:29   ` [PATCH v4 00/15] mlx5/vdpa: optimize live migration time Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220606114650.209612-17-lizh@nvidia.com \
    --to=lizh@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=roniba@nvidia.com \
    --cc=shahafs@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    --cc=yajunw@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.