All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matan Azrad <matan@mellanox.com>
To: Ferruh Yigit <ferruh.yigit@intel.com>,
	Shahaf Shuler <shahafs@mellanox.com>,
	Yongseok Koh <yskoh@mellanox.com>,
	Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Cc: dev@dpdk.org, Dekel Peled <dekelp@mellanox.com>
Subject: [dpdk-dev] [PATCH v2 21/28] net/mlx5: create advanced RxQ using new API
Date: Mon, 22 Jul 2019 14:52:18 +0000	[thread overview]
Message-ID: <1563807145-16577-22-git-send-email-matan@mellanox.com> (raw)
In-Reply-To: <1563807145-16577-1-git-send-email-matan@mellanox.com>

From: Dekel Peled <dekelp@mellanox.com>

Function mlx5_rxq_obj_new(), previously called mlx5_rxq_ibv_new(),
supports creating Rx queue objects using verbs.
This patch expands the relevant functions, to support creating
verbs or DevX Rx queue objects:
Function mlx5_rxq_obj_new() updated to create RQ object using DevX.
Function mlx5_ind_table_obj_new() updated to create RQT object using DevX.
Function mlx5_hrxq_new() updated to create TIR object using DevX.
New utility functions added to perform specific operations:
mlx5_devx_rq_new(),  mlx5_devx_wq_attr_fill(),
mlx5_devx_create_rq_attr_fill().

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxq.c     | 547 +++++++++++++++++++++++++++++++---------
 drivers/net/mlx5/mlx5_rxtx.h    |   9 +-
 drivers/net/mlx5/mlx5_trigger.c |   3 +-
 drivers/net/mlx5/mlx5_vlan.c    |  30 ++-
 4 files changed, 452 insertions(+), 137 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 9d859df..1e09078 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -561,6 +561,23 @@
 }
 
 /**
+ * Release the resources allocated for an RQ DevX object.
+ *
+ * @param rxq_ctrl
+ *   DevX Rx queue object.
+ */
+static void
+rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+	if (rxq_ctrl->rxq.wqes) {
+		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
+		rxq_ctrl->rxq.wqes = NULL;
+	}
+	if (rxq_ctrl->wq_umem)
+		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
+}
+
+/**
  * Release an Rx verbs/DevX queue object.
  *
  * @param rxq_obj
@@ -573,11 +590,17 @@
 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
 {
 	assert(rxq_obj);
-	assert(rxq_obj->wq);
+	if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV)
+		assert(rxq_obj->wq);
 	assert(rxq_obj->cq);
 	if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
 		rxq_free_elts(rxq_obj->rxq_ctrl);
-		claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+		if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+		} else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+			claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
+			rxq_release_rq_resources(rxq_obj->rxq_ctrl);
+		}
 		claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
 		if (rxq_obj->channel)
 			claim_zero(mlx5_glue->destroy_comp_channel
@@ -1000,18 +1023,147 @@
 }
 
 /**
+ * Fill common fields of create RQ attributes structure.
+ *
+ * @param rxq_data
+ *   Pointer to Rx queue data.
+ * @param cqn
+ *   CQ number to use with this RQ.
+ * @param rq_attr
+ *   RQ attributes structure to fill..
+ */
+static void
+mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
+			      struct mlx5_devx_create_rq_attr *rq_attr)
+{
+	rq_attr->state = MLX5_RQC_STATE_RST;
+	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
+	rq_attr->cqn = cqn;
+	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
+}
+
+/**
+ * Fill common fields of DevX WQ attributes structure.
+ *
+ * @param priv
+ *   Pointer to device private data.
+ * @param rxq_ctrl
+ *   Pointer to Rx queue control structure.
+ * @param wq_attr
+ *   WQ attributes structure to fill..
+ */
+static void
+mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
+		       struct mlx5_devx_wq_attr *wq_attr)
+{
+	wq_attr->end_padding_mode = priv->config.cqe_pad ?
+					MLX5_WQ_END_PAD_MODE_ALIGN :
+					MLX5_WQ_END_PAD_MODE_NONE;
+	wq_attr->pd = priv->sh->pdn;
+	wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
+	wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
+	wq_attr->dbr_umem_valid = 1;
+	wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
+	wq_attr->wq_umem_valid = 1;
+}
+
+/**
+ * Create a RQ object using DevX.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   Queue index in DPDK Rx queue array
+ * @param cqn
+ *   CQ number to use with this RQ.
+ *
+ * @return
+ *   The DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_devx_obj *
+mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+	struct mlx5_rxq_ctrl *rxq_ctrl =
+		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_devx_create_rq_attr rq_attr;
+	uint32_t wqe_n = 1 << rxq_data->elts_n;
+	uint32_t wq_size = 0;
+	uint32_t wqe_size = 0;
+	uint32_t log_wqe_size = 0;
+	void *buf = NULL;
+	struct mlx5_devx_obj *rq;
+
+	memset(&rq_attr, 0, sizeof(rq_attr));
+	/* Fill RQ attributes. */
+	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
+	rq_attr.flush_in_error_en = 1;
+	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
+	/* Fill WQ attributes for this RQ. */
+	if (mlx5_rxq_mprq_enabled(rxq_data)) {
+		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
+		/*
+		 * Number of strides in each WQE:
+		 * 512*2^single_wqe_log_num_of_strides.
+		 */
+		rq_attr.wq_attr.single_wqe_log_num_of_strides =
+				rxq_data->strd_num_n -
+				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
+		rq_attr.wq_attr.single_stride_log_num_of_bytes =
+				rxq_data->strd_sz_n -
+				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+		wqe_size = sizeof(struct mlx5_wqe_mprq);
+	} else {
+		int max_sge = 0;
+		int num_scatter = 0;
+
+		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
+		max_sge = 1 << rxq_data->sges_n;
+		num_scatter = RTE_MAX(max_sge, 1);
+		wqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter;
+	}
+	log_wqe_size = log2above(wqe_size);
+	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
+	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n;
+	/* Calculate and allocate WQ memory space. */
+	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
+	wq_size = wqe_n * wqe_size;
+	buf = rte_calloc_socket(__func__, 1, wq_size, RTE_CACHE_LINE_SIZE,
+				rxq_ctrl->socket);
+	if (!buf)
+		return NULL;
+	rxq_data->wqes = buf;
+	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
+						     buf, wq_size, 0);
+	if (!rxq_ctrl->wq_umem) {
+		rte_free(buf);
+		return NULL;
+	}
+	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
+	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
+	if (!rq)
+		rxq_release_rq_resources(rxq_ctrl);
+	return rq;
+}
+
+/**
  * Create the Rx queue Verbs/DevX object.
  *
  * @param dev
  *   Pointer to Ethernet device.
  * @param idx
  *   Queue index in DPDK Rx queue array
+ * @param type
+ *   Type of Rx queue object to create.
  *
  * @return
  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
  */
 struct mlx5_rxq_obj *
-mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
+		 enum mlx5_rxq_obj_type type)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
@@ -1039,6 +1191,7 @@ struct mlx5_rxq_obj *
 		rte_errno = ENOMEM;
 		goto error;
 	}
+	tmpl->type = type;
 	tmpl->rxq_ctrl = rxq_ctrl;
 	if (rxq_ctrl->irq) {
 		tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
@@ -1060,35 +1213,9 @@ struct mlx5_rxq_obj *
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
-		dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
-	DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
-		dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
-	tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n, tmpl);
-	if (!tmpl->wq) {
-		DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
-			dev->data->port_id, idx);
-		rte_errno = ENOMEM;
-		goto error;
-	}
-	/* Change queue state to ready. */
-	mod = (struct ibv_wq_attr){
-		.attr_mask = IBV_WQ_ATTR_STATE,
-		.wq_state = IBV_WQS_RDY,
-	};
-	ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
-	if (ret) {
-		DRV_LOG(ERR,
-			"port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
-			dev->data->port_id, idx);
-		rte_errno = ret;
-		goto error;
-	}
 	obj.cq.in = tmpl->cq;
 	obj.cq.out = &cq_info;
-	obj.rwq.in = tmpl->wq;
-	obj.rwq.out = &rwq;
-	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
+	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
 	if (ret) {
 		rte_errno = ret;
 		goto error;
@@ -1101,9 +1228,73 @@ struct mlx5_rxq_obj *
 		rte_errno = EINVAL;
 		goto error;
 	}
+	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
+		dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
+	DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
+		dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+	/* Allocate door-bell for types created with DevX. */
+	if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
+		struct mlx5_devx_dbr_page *dbr_page;
+		int64_t dbr_offset;
+
+		dbr_offset = mlx5_get_dbr(dev, &dbr_page);
+		if (dbr_offset < 0)
+			goto error;
+		rxq_ctrl->dbr_offset = dbr_offset;
+		rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
+		rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
+					       (uintptr_t)rxq_ctrl->dbr_offset);
+	}
+	if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+		tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
+					   tmpl);
+		if (!tmpl->wq) {
+			DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
+				dev->data->port_id, idx);
+			rte_errno = ENOMEM;
+			goto error;
+		}
+		/* Change queue state to ready. */
+		mod = (struct ibv_wq_attr){
+			.attr_mask = IBV_WQ_ATTR_STATE,
+			.wq_state = IBV_WQS_RDY,
+		};
+		ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
+		if (ret) {
+			DRV_LOG(ERR,
+				"port %u Rx queue %u WQ state to IBV_WQS_RDY"
+				" failed", dev->data->port_id, idx);
+			rte_errno = ret;
+			goto error;
+		}
+		obj.rwq.in = tmpl->wq;
+		obj.rwq.out = &rwq;
+		ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
+		if (ret) {
+			rte_errno = ret;
+			goto error;
+		}
+		rxq_data->wqes = rwq.buf;
+		rxq_data->rq_db = rwq.dbrec;
+	} else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+		struct mlx5_devx_modify_rq_attr rq_attr;
+
+		memset(&rq_attr, 0, sizeof(rq_attr));
+		tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
+		if (!tmpl->rq) {
+			DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
+				dev->data->port_id, idx);
+			rte_errno = ENOMEM;
+			goto error;
+		}
+		/* Change queue state to ready. */
+		rq_attr.rq_state = MLX5_RQC_STATE_RST;
+		rq_attr.state = MLX5_RQC_STATE_RDY;
+		ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
+		if (ret)
+			goto error;
+	}
 	/* Fill the rings. */
-	rxq_data->wqes = rwq.buf;
-	rxq_data->rq_db = rwq.dbrec;
 	rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
 	rxq_data->cq_db = cq_info.dbrec;
 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
@@ -1121,8 +1312,10 @@ struct mlx5_rxq_obj *
 error:
 	if (tmpl) {
 		ret = rte_errno; /* Save rte_errno before cleanup. */
-		if (tmpl->wq)
+		if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
 			claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+		else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
+			claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
 		if (tmpl->cq)
 			claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
 		if (tmpl->channel)
@@ -1131,6 +1324,8 @@ struct mlx5_rxq_obj *
 		rte_free(tmpl);
 		rte_errno = ret; /* Restore rte_errno. */
 	}
+	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
+		rxq_release_rq_resources(rxq_ctrl);
 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
 	return NULL;
 }
@@ -1585,6 +1780,8 @@ struct mlx5_rxq_ctrl *
 	if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
 		rxq_ctrl->obj = NULL;
 	if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
+		claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
+					    rxq_ctrl->dbr_offset));
 		mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
 		LIST_REMOVE(rxq_ctrl, next);
 		rte_free(rxq_ctrl);
@@ -1633,16 +1830,11 @@ struct mlx5_rxq_ctrl *
  */
 static struct mlx5_ind_table_obj *
 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
-		       uint32_t queues_n)
+		       uint32_t queues_n, enum mlx5_ind_tbl_type type)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_ind_table_obj *ind_tbl;
-	const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
-		log2above(queues_n) :
-		log2above(priv->config.ind_table_max_size);
-	struct ibv_wq *wq[1 << wq_n];
-	unsigned int i;
-	unsigned int j;
+	unsigned int i = 0, j = 0, k = 0;
 
 	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
 			     queues_n * sizeof(uint16_t), 0);
@@ -1650,33 +1842,75 @@ struct mlx5_rxq_ctrl *
 		rte_errno = ENOMEM;
 		return NULL;
 	}
-	for (i = 0; i != queues_n; ++i) {
-		struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
+	ind_tbl->type = type;
+	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
+		const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
+			log2above(queues_n) :
+			log2above(priv->config.ind_table_max_size);
+		struct ibv_wq *wq[1 << wq_n];
+
+		for (i = 0; i != queues_n; ++i) {
+			struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
+								 queues[i]);
+			if (!rxq)
+				goto error;
+			wq[i] = rxq->obj->wq;
+			ind_tbl->queues[i] = queues[i];
+		}
+		ind_tbl->queues_n = queues_n;
+		/* Finalise indirection table. */
+		k = i; /* Retain value of i for use in error case. */
+		for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
+			wq[k] = wq[j];
+		ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
+			(priv->sh->ctx,
+			 &(struct ibv_rwq_ind_table_init_attr){
+				.log_ind_tbl_size = wq_n,
+				.ind_tbl = wq,
+				.comp_mask = 0,
+			});
+		if (!ind_tbl->ind_table) {
+			rte_errno = errno;
+			goto error;
+		}
+	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
+		struct mlx5_devx_rqt_attr *rqt_attr = NULL;
 
-		if (!rxq)
+		rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
+				      queues_n * sizeof(uint16_t), 0);
+		if (!rqt_attr) {
+			DRV_LOG(ERR, "port %u cannot allocate RQT resources",
+				dev->data->port_id);
+			rte_errno = ENOMEM;
 			goto error;
-		wq[i] = rxq->obj->wq;
-		ind_tbl->queues[i] = queues[i];
-	}
-	ind_tbl->queues_n = queues_n;
-	/* Finalise indirection table. */
-	for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
-		wq[i] = wq[j];
-	ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
-		(priv->sh->ctx,
-		 &(struct ibv_rwq_ind_table_init_attr){
-			.log_ind_tbl_size = wq_n,
-			.ind_tbl = wq,
-			.comp_mask = 0,
-		 });
-	if (!ind_tbl->ind_table) {
-		rte_errno = errno;
-		goto error;
+		}
+		rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
+		rqt_attr->rqt_actual_size = queues_n;
+		for (i = 0; i != queues_n; ++i) {
+			struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
+								 queues[i]);
+			if (!rxq)
+				goto error;
+			rqt_attr->rq_list[i] = rxq->obj->rq->id;
+			ind_tbl->queues[i] = queues[i];
+		}
+		ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
+							rqt_attr);
+		rte_free(rqt_attr);
+		if (!ind_tbl->rqt) {
+			DRV_LOG(ERR, "port %u cannot create DevX RQT",
+				dev->data->port_id);
+			rte_errno = errno;
+			goto error;
+		}
+		ind_tbl->queues_n = queues_n;
 	}
 	rte_atomic32_inc(&ind_tbl->refcnt);
 	LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
 	return ind_tbl;
 error:
+	for (j = 0; j < i; j++)
+		mlx5_rxq_release(dev, ind_tbl->queues[j]);
 	rte_free(ind_tbl);
 	DEBUG("port %u cannot create indirection table", dev->data->port_id);
 	return NULL;
@@ -1736,9 +1970,13 @@ struct mlx5_rxq_ctrl *
 {
 	unsigned int i;
 
-	if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
-		claim_zero(mlx5_glue->destroy_rwq_ind_table
-			   (ind_tbl->ind_table));
+	if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
+		if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
+			claim_zero(mlx5_glue->destroy_rwq_ind_table
+							(ind_tbl->ind_table));
+		else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
+			claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
+	}
 	for (i = 0; i != ind_tbl->queues_n; ++i)
 		claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
 	if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1805,93 +2043,145 @@ struct mlx5_hrxq *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq;
+	struct ibv_qp *qp = NULL;
 	struct mlx5_ind_table_obj *ind_tbl;
-	struct ibv_qp *qp;
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	struct mlx5dv_qp_init_attr qp_init_attr;
-#endif
 	int err;
+	struct mlx5_devx_obj *tir = NULL;
 
 	queues_n = hash_fields ? queues_n : 1;
 	ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
-	if (!ind_tbl)
-		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+	if (!ind_tbl) {
+		struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
+		struct mlx5_rxq_ctrl *rxq_ctrl =
+			container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+		enum mlx5_ind_tbl_type type;
+
+		type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
+				MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
+		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
+	}
 	if (!ind_tbl) {
 		rte_errno = ENOMEM;
 		return NULL;
 	}
+	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	memset(&qp_init_attr, 0, sizeof(qp_init_attr));
-	if (tunnel) {
-		qp_init_attr.comp_mask =
+		struct mlx5dv_qp_init_attr qp_init_attr;
+
+		memset(&qp_init_attr, 0, sizeof(qp_init_attr));
+		if (tunnel) {
+			qp_init_attr.comp_mask =
 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
-		qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
-	}
+			qp_init_attr.create_flags =
+				MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
+		}
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	if (dev->data->dev_conf.lpbk_mode) {
-		/* Allow packet sent from NIC loop back w/o source MAC check. */
-		qp_init_attr.comp_mask |=
+		if (dev->data->dev_conf.lpbk_mode) {
+			/*
+			 * Allow packet sent from NIC loop back
+			 * w/o source MAC check.
+			 */
+			qp_init_attr.comp_mask |=
 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
-		qp_init_attr.create_flags |=
+			qp_init_attr.create_flags |=
 				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
-	}
+		}
 #endif
-	qp = mlx5_glue->dv_create_qp
-		(priv->sh->ctx,
-		 &(struct ibv_qp_init_attr_ex){
-			.qp_type = IBV_QPT_RAW_PACKET,
-			.comp_mask =
-				IBV_QP_INIT_ATTR_PD |
-				IBV_QP_INIT_ATTR_IND_TABLE |
-				IBV_QP_INIT_ATTR_RX_HASH,
-			.rx_hash_conf = (struct ibv_rx_hash_conf){
-				.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
-				.rx_hash_key_len = rss_key_len,
-				.rx_hash_key = (void *)(uintptr_t)rss_key,
-				.rx_hash_fields_mask = hash_fields,
-			},
-			.rwq_ind_tbl = ind_tbl->ind_table,
-			.pd = priv->sh->pd,
-		 },
-		 &qp_init_attr);
+		qp = mlx5_glue->dv_create_qp
+			(priv->sh->ctx,
+			 &(struct ibv_qp_init_attr_ex){
+				.qp_type = IBV_QPT_RAW_PACKET,
+				.comp_mask =
+					IBV_QP_INIT_ATTR_PD |
+					IBV_QP_INIT_ATTR_IND_TABLE |
+					IBV_QP_INIT_ATTR_RX_HASH,
+				.rx_hash_conf = (struct ibv_rx_hash_conf){
+					.rx_hash_function =
+						IBV_RX_HASH_FUNC_TOEPLITZ,
+					.rx_hash_key_len = rss_key_len,
+					.rx_hash_key =
+						(void *)(uintptr_t)rss_key,
+					.rx_hash_fields_mask = hash_fields,
+				},
+				.rwq_ind_tbl = ind_tbl->ind_table,
+				.pd = priv->sh->pd,
+			  },
+			  &qp_init_attr);
 #else
-	qp = mlx5_glue->create_qp_ex
-		(priv->sh->ctx,
-		 &(struct ibv_qp_init_attr_ex){
-			.qp_type = IBV_QPT_RAW_PACKET,
-			.comp_mask =
-				IBV_QP_INIT_ATTR_PD |
-				IBV_QP_INIT_ATTR_IND_TABLE |
-				IBV_QP_INIT_ATTR_RX_HASH,
-			.rx_hash_conf = (struct ibv_rx_hash_conf){
-				.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
-				.rx_hash_key_len = rss_key_len,
-				.rx_hash_key = (void *)(uintptr_t)rss_key,
-				.rx_hash_fields_mask = hash_fields,
-			},
-			.rwq_ind_tbl = ind_tbl->ind_table,
-			.pd = priv->sh->pd,
-		 });
+		qp = mlx5_glue->create_qp_ex
+			(priv->sh->ctx,
+			 &(struct ibv_qp_init_attr_ex){
+				.qp_type = IBV_QPT_RAW_PACKET,
+				.comp_mask =
+					IBV_QP_INIT_ATTR_PD |
+					IBV_QP_INIT_ATTR_IND_TABLE |
+					IBV_QP_INIT_ATTR_RX_HASH,
+				.rx_hash_conf = (struct ibv_rx_hash_conf){
+					.rx_hash_function =
+						IBV_RX_HASH_FUNC_TOEPLITZ,
+					.rx_hash_key_len = rss_key_len,
+					.rx_hash_key =
+						(void *)(uintptr_t)rss_key,
+					.rx_hash_fields_mask = hash_fields,
+				},
+				.rwq_ind_tbl = ind_tbl->ind_table,
+				.pd = priv->sh->pd,
+			 });
 #endif
-	if (!qp) {
-		rte_errno = errno;
-		goto error;
+		if (!qp) {
+			rte_errno = errno;
+			goto error;
+		}
+	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
+		struct mlx5_devx_tir_attr tir_attr;
+
+		memset(&tir_attr, 0, sizeof(tir_attr));
+		tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
+		tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
+		memcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields,
+		       sizeof(uint64_t));
+		tir_attr.transport_domain = priv->sh->tdn;
+		memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
+		tir_attr.indirect_table = ind_tbl->rqt->id;
+		if (dev->data->dev_conf.lpbk_mode)
+			tir_attr.self_lb_block =
+					MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+		tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
+		if (!tir) {
+			DRV_LOG(ERR, "port %u cannot create DevX TIR",
+				dev->data->port_id);
+			rte_errno = errno;
+			goto error;
+		}
 	}
 	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
 	if (!hrxq)
 		goto error;
 	hrxq->ind_table = ind_tbl;
-	hrxq->qp = qp;
+	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
+		hrxq->qp = qp;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		hrxq->action =
+			mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
+		if (!hrxq->action) {
+			rte_errno = errno;
+			goto error;
+		}
+#endif
+	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
+		hrxq->tir = tir;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+		hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
+							(hrxq->tir->obj);
+		if (!hrxq->action) {
+			rte_errno = errno;
+			goto error;
+		}
+#endif
+	}
 	hrxq->rss_key_len = rss_key_len;
 	hrxq->hash_fields = hash_fields;
 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
-	if (!hrxq->action) {
-		rte_errno = errno;
-		goto error;
-	}
-#endif
 	rte_atomic32_inc(&hrxq->refcnt);
 	LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
 	return hrxq;
@@ -1900,6 +2190,8 @@ struct mlx5_hrxq *
 	mlx5_ind_table_obj_release(dev, ind_tbl);
 	if (qp)
 		claim_zero(mlx5_glue->destroy_qp(qp));
+	else if (tir)
+		claim_zero(mlx5_devx_cmd_destroy(tir));
 	rte_errno = err; /* Restore rte_errno. */
 	return NULL;
 }
@@ -1970,7 +2262,10 @@ struct mlx5_hrxq *
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 		mlx5_glue->destroy_flow_action(hrxq->action);
 #endif
-		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+		if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
+			claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+		else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
+			claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
 		mlx5_ind_table_obj_release(dev, hrxq->ind_table);
 		LIST_REMOVE(hrxq, next);
 		rte_free(hrxq);
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index f4f5c0d..bd4ae80 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -80,6 +80,9 @@ struct mlx5_mprq_buf {
 /* Get pointer to the first stride. */
 #define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
 
+#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
+#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
+
 enum mlx5_rxq_err_state {
 	MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
 	MLX5_RXQ_ERR_STATE_NEED_RESET,
@@ -174,6 +177,9 @@ struct mlx5_rxq_ctrl {
 	uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
 	uint32_t wqn; /* WQ number. */
 	uint16_t dump_file_n; /* Number of dump files. */
+	uint32_t dbr_umem_id; /* Storing door-bell information, */
+	uint64_t dbr_offset;  /* needed when freeing door-bell. */
+	struct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */
 };
 
 enum mlx5_ind_tbl_type {
@@ -324,7 +330,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
+				      enum mlx5_rxq_obj_type type);
 int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 54353ee..acd2902 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -123,7 +123,8 @@
 		ret = rxq_alloc_elts(rxq_ctrl);
 		if (ret)
 			goto error;
-		rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i);
+		rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i,
+						 MLX5_RXQ_OBJ_TYPE_DEVX_RQ);
 		if (!rxq_ctrl->obj)
 			goto error;
 		rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 67518c2..5f6554a 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -111,7 +111,7 @@
 	uint16_t vlan_offloads =
 		(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
 		0;
-	int ret;
+	int ret = 0;
 
 	/* Validate hw support */
 	if (!priv->config.hw_vlan_strip) {
@@ -132,15 +132,27 @@
 		rxq->vlan_strip = !!on;
 		return;
 	}
-	mod = (struct ibv_wq_attr){
-		.attr_mask = IBV_WQ_ATTR_FLAGS,
-		.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
-		.flags = vlan_offloads,
-	};
-	ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
+	if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+		mod = (struct ibv_wq_attr){
+			.attr_mask = IBV_WQ_ATTR_FLAGS,
+			.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
+			.flags = vlan_offloads,
+		};
+		ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
+	} else if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+		struct mlx5_devx_modify_rq_attr rq_attr;
+
+		memset(&rq_attr, 0, sizeof(rq_attr));
+		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
+		rq_attr.state = MLX5_RQC_STATE_RDY;
+		rq_attr.vsd = (on ? 0 : 1);
+		rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
+		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
+	}
 	if (ret) {
-		DRV_LOG(ERR, "port %u failed to modified stripping mode: %s",
-			dev->data->port_id, strerror(rte_errno));
+		DRV_LOG(ERR, "port %u failed to modify object %d stripping "
+			"mode: %s", dev->data->port_id,
+			rxq_ctrl->obj->type, strerror(rte_errno));
 		return;
 	}
 	/* Update related bits in RX queue. */
-- 
1.8.3.1


  parent reply	other threads:[~2019-07-22 14:56 UTC|newest]

Thread overview: 92+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-22  9:12 [dpdk-dev] [PATCH 00/28] net/mlx5: support LRO Matan Azrad
2019-07-22  9:12 ` [dpdk-dev] [PATCH 01/28] net/mlx5: remove redundant item from union Matan Azrad
2019-07-22  9:17   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 02/28] net/mlx5: add LRO APIs and initial settings Matan Azrad
2019-07-22  9:25   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 03/28] net/mlx5: support LRO caps query using devx API Matan Azrad
2019-07-22  9:17   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 04/28] net/mlx5: glue func for queue query using new API Matan Azrad
2019-07-22  9:18   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 05/28] net/mlx5: glue function for action " Matan Azrad
2019-07-22  9:18   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 06/28] net/mlx5: check conditions to enable LRO Matan Azrad
2019-07-22  9:18   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 07/28] net/mlx5: support Tx interface query using new API Matan Azrad
2019-07-22  9:19   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 08/28] net/mlx5: update Tx queue create for LRO Matan Azrad
2019-07-22  9:18   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 09/28] net/mlx5: create advanced RxQ object using new API Matan Azrad
2019-07-22  9:17   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 10/28] net/mlx5: modify " Matan Azrad
2019-07-22  9:20   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 11/28] net/mlx5: create advanced Rx " Matan Azrad
2019-07-22  9:20   ` Slava Ovsiienko
2019-07-22  9:12 ` [dpdk-dev] [PATCH 12/28] net/mlx5: create advanced RxQ table " Matan Azrad
2019-07-22  9:21   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 13/28] net/mlx5: allocate door-bells " Matan Azrad
2019-07-22  9:20   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 14/28] net/mlx5: rename RxQ verbs to general RxQ object Matan Azrad
2019-07-22  9:22   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 15/28] net/mlx5: rename verbs indirection table to obj Matan Azrad
2019-07-22  9:22   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 16/28] net/mlx5: rename hash RxQ verbs to general Matan Azrad
2019-07-22  9:22   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 17/28] net/mlx5: update queue state modify function Matan Azrad
2019-07-22  9:22   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 18/28] net/mlx5: store protection domain number on create Matan Azrad
2019-07-22  9:21   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 19/28] net/mlx5: func to create Rx verbs completion queue Matan Azrad
2019-07-22  9:23   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 20/28] net/mlx5: function to create Rx verbs work queue Matan Azrad
2019-07-22  9:21   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 21/28] net/mlx5: create advanced RxQ using new API Matan Azrad
2019-07-22  9:21   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 22/28] net/mlx5: support LRO with single RxQ object Matan Azrad
2019-07-22  9:22   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 23/28] net/mlx5: replace the external mbuf shared memory Matan Azrad
2019-07-22  9:21   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 24/28] net/mlx5: update LRO fields in completion entry Matan Azrad
2019-07-22  9:23   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 25/28] net/mlx5: handle LRO packets in Rx queue Matan Azrad
2019-07-22  9:26   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 26/28] net/mlx5: zero the LRO mbuf headroom Matan Azrad
2019-07-22  9:23   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 27/28] net/mlx5: adjust the maximum LRO message size Matan Azrad
2019-07-22  9:23   ` Slava Ovsiienko
2019-07-22  9:13 ` [dpdk-dev] [PATCH 28/28] doc: update MLX5 doc and release notes with LRO Matan Azrad
2019-07-22  9:23   ` Slava Ovsiienko
2019-07-22 10:42 ` [dpdk-dev] [PATCH 00/28] net/mlx5: support LRO Raslan Darawsheh
2019-07-22 12:48 ` Ferruh Yigit
2019-07-22 13:32   ` Matan Azrad
2019-07-22 14:51 ` [dpdk-dev] [PATCH v2 " Matan Azrad
2019-07-22 14:51   ` [dpdk-dev] [PATCH v2 01/28] net/mlx5: remove redundant item from union Matan Azrad
2019-07-23 10:53     ` Ferruh Yigit
2019-07-23 12:10       ` Matan Azrad
2019-07-22 14:51   ` [dpdk-dev] [PATCH v2 02/28] net/mlx5: add LRO APIs and initial settings Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 03/28] net/mlx5: support LRO caps query using devx API Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 04/28] net/mlx5: glue func for queue query using new API Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 05/28] net/mlx5: glue function for action " Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 06/28] net/mlx5: check conditions to enable LRO Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 07/28] net/mlx5: support Tx interface query using new API Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 08/28] net/mlx5: update Tx queue create for LRO Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 09/28] net/mlx5: create advanced RxQ object using new API Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 10/28] net/mlx5: modify " Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 11/28] net/mlx5: create advanced Rx " Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 12/28] net/mlx5: create advanced RxQ table " Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 13/28] net/mlx5: allocate door-bells " Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 14/28] net/mlx5: rename RxQ verbs to general RxQ object Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 15/28] net/mlx5: rename verbs indirection table to obj Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 16/28] net/mlx5: rename hash RxQ verbs to general Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 17/28] net/mlx5: update queue state modify function Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 18/28] net/mlx5: store protection domain number on create Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 19/28] net/mlx5: func to create Rx verbs completion queue Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 20/28] net/mlx5: function to create Rx verbs work queue Matan Azrad
2019-07-22 14:52   ` Matan Azrad [this message]
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 22/28] net/mlx5: support LRO with single RxQ object Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 23/28] net/mlx5: replace the external mbuf shared memory Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 24/28] net/mlx5: update LRO fields in completion entry Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 25/28] net/mlx5: handle LRO packets in Rx queue Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 26/28] net/mlx5: zero the LRO mbuf headroom Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 27/28] net/mlx5: adjust the maximum LRO message size Matan Azrad
2019-07-22 14:52   ` [dpdk-dev] [PATCH v2 28/28] doc: update MLX5 doc and release notes with LRO Matan Azrad
2019-07-23  6:48   ` [dpdk-dev] [PATCH v2 00/28] net/mlx5: support LRO Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1563807145-16577-22-git-send-email-matan@mellanox.com \
    --to=matan@mellanox.com \
    --cc=dekelp@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=shahafs@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.