All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
	Raslan Darawsheh <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 12/17] net/mlx5: move rearm and clock queue SQ creation to common
Date: Tue, 29 Dec 2020 08:52:19 +0000	[thread overview]
Message-ID: <1609231944-29274-13-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1609231944-29274-1-git-send-email-michaelba@nvidia.com>

Using common function for DevX SQ creation for rearm and clock queue.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.h      |   8 +--
 drivers/net/mlx5/mlx5_txpp.c | 147 +++++++++++--------------------------------
 2 files changed, 36 insertions(+), 119 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 9a59c26..192a5a7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -611,15 +611,9 @@ struct mlx5_txpp_wq {
 	uint32_t cq_ci:24;
 	uint32_t arm_sn:2;
 	/* Send Queue related data.*/
-	struct mlx5_devx_obj *sq;
-	void *sq_umem;
-	union {
-		volatile void *sq_buf;
-		volatile struct mlx5_wqe *wqes;
-	};
+	struct mlx5_devx_sq sq_obj;
 	uint16_t sq_size; /* Number of WQEs in the queue. */
 	uint16_t sq_ci; /* Next WQE to execute. */
-	volatile uint32_t *sq_dbrec;
 };
 
 /* Tx packet pacing internal timestamp. */
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 54ea572..b6ff7e0 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -121,12 +121,7 @@
 static void
 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
 {
-	if (wq->sq)
-		claim_zero(mlx5_devx_cmd_destroy(wq->sq));
-	if (wq->sq_umem)
-		claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
-	if (wq->sq_buf)
-		mlx5_free((void *)(uintptr_t)wq->sq_buf);
+	mlx5_devx_sq_destroy(&wq->sq_obj);
 	mlx5_devx_cq_destroy(&wq->cq_obj);
 	memset(wq, 0, sizeof(*wq));
 }
@@ -155,6 +150,7 @@
 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
 {
 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
+	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
 	union {
 		uint32_t w32[2];
 		uint64_t w64;
@@ -163,11 +159,11 @@
 
 	wq->sq_ci = ci + 1;
 	cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
-		   (wq->wqes[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
-	cs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1];
+			(wqe[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
+	cs.w32[1] = wqe[ci & (wq->sq_size - 1)].ctrl[1];
 	/* Update SQ doorbell record with new SQ ci. */
 	rte_compiler_barrier();
-	*wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci);
+	*wq->sq_obj.db_rec = rte_cpu_to_be_32(wq->sq_ci);
 	/* Make sure the doorbell record is updated. */
 	rte_wmb();
 	/* Write to doorbel register to start processing. */
@@ -180,7 +176,7 @@
 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
 {
 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
-	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
+	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
 	uint32_t i;
 
 	for (i = 0; i < wq->sq_size; i += 2) {
@@ -191,7 +187,7 @@
 		/* Build SEND_EN request with slave WQE index. */
 		cs = &wqe[i + 0].cseg;
 		cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
-		cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
+		cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
 		cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
 				     MLX5_COMP_MODE_OFFSET);
 		cs->misc = RTE_BE32(0);
@@ -199,11 +195,12 @@
 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
 			((1 << MLX5_WQ_INDEX_WIDTH) - 1);
 		qs->max_index = rte_cpu_to_be_32(index);
-		qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
+		qs->qpn_cqn =
+			   rte_cpu_to_be_32(sh->txpp.clock_queue.sq_obj.sq->id);
 		/* Build WAIT request with slave CQE index. */
 		cs = &wqe[i + 1].cseg;
 		cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
-		cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
+		cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
 		cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
 				     MLX5_COMP_MODE_OFFSET);
 		cs->misc = RTE_BE32(0);
@@ -220,7 +217,16 @@
 static int
 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
 {
-	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
+	struct mlx5_devx_create_sq_attr sq_attr = {
+		.cd_master = 1,
+		.state = MLX5_SQC_STATE_RST,
+		.tis_lst_sz = 1,
+		.tis_num = sh->tis->id,
+		.wq_attr = (struct mlx5_devx_wq_attr){
+			.pd = sh->pdn,
+			.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+		},
+	};
 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
 	struct mlx5_devx_cq_attr cq_attr = {
 		.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
@@ -228,15 +234,8 @@
 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
 	};
 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
-	size_t page_size;
-	uint32_t umem_size, umem_dbrec;
 	int ret;
 
-	page_size = rte_mem_page_size();
-	if (page_size == (size_t)-1) {
-		DRV_LOG(ERR, "Failed to get mem page size");
-		return -ENOMEM;
-	}
 	/* Create completion queue object for Rearm Queue. */
 	ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
 				  log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
@@ -247,63 +246,25 @@
 	}
 	wq->cq_ci = 0;
 	wq->arm_sn = 0;
-	/*
-	 * Allocate memory buffer for Send Queue WQEs.
-	 * There should be no WQE leftovers in the cyclic queue.
-	 */
 	wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
-	umem_size =  MLX5_WQE_SIZE * wq->sq_size;
-	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-	umem_size += MLX5_DBR_SIZE;
-	wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
-				 page_size, sh->numa_node);
-	if (!wq->sq_buf) {
-		DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
-		rte_errno = ENOMEM;
-		goto error;
-	}
-	/* Register allocated buffer in user space with DevX. */
-	wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-					       (void *)(uintptr_t)wq->sq_buf,
-					       umem_size,
-					       IBV_ACCESS_LOCAL_WRITE);
-	if (!wq->sq_umem) {
-		rte_errno = errno;
-		DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
-		goto error;
-	}
 	/* Create send queue object for Rearm Queue. */
-	sq_attr.state = MLX5_SQC_STATE_RST;
-	sq_attr.tis_lst_sz = 1;
-	sq_attr.tis_num = sh->tis->id;
 	sq_attr.cqn = wq->cq_obj.cq->id;
-	sq_attr.cd_master = 1;
-	sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
-	sq_attr.wq_attr.pd = sh->pdn;
-	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
-	sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
-	sq_attr.wq_attr.dbr_umem_valid = 1;
-	sq_attr.wq_attr.dbr_addr = umem_dbrec;
-	sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
-	sq_attr.wq_attr.wq_umem_valid = 1;
-	sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
-	sq_attr.wq_attr.wq_umem_offset = 0;
-	wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
-	if (!wq->sq) {
+	/* There should be no WQE leftovers in the cyclic queue. */
+	ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj,
+				  log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
+				  sh->numa_node);
+	if (ret) {
 		rte_errno = errno;
 		DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
 		goto error;
 	}
-	wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
-				   MLX5_SND_DBR * sizeof(uint32_t));
 	/* Build the WQEs in the Send Queue before goto Ready state. */
 	mlx5_txpp_fill_wqe_rearm_queue(sh);
 	/* Change queue state to ready. */
 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
 	msq_attr.state = MLX5_SQC_STATE_RDY;
-	ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
+	ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
 		goto error;
@@ -320,7 +281,7 @@
 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
 {
 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
+	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
 	struct mlx5_wqe_cseg *cs = &wqe->cseg;
 	uint32_t wqe_size, opcode, i;
 	uint8_t *dst;
@@ -338,7 +299,7 @@
 		opcode = MLX5_OPCODE_NOP;
 	}
 	cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
-	cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
+	cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) |
 				     (wqe_size / MLX5_WSEG_SIZE));
 	cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
 	cs->misc = RTE_BE32(0);
@@ -407,10 +368,11 @@
 	}
 wcopy:
 	/* Duplicate the pattern to the next WQEs. */
-	dst = (uint8_t *)(uintptr_t)wq->sq_buf;
+	dst = (uint8_t *)(uintptr_t)wq->sq_obj.umem_buf;
 	for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
 		dst += wqe_size;
-		rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
+		rte_memcpy(dst, (void *)(uintptr_t)wq->sq_obj.umem_buf,
+			   wqe_size);
 	}
 }
 
@@ -428,15 +390,8 @@
 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
 	};
 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-	size_t page_size;
-	uint32_t umem_size, umem_dbrec;
 	int ret;
 
-	page_size = rte_mem_page_size();
-	if (page_size == (size_t)-1) {
-		DRV_LOG(ERR, "Failed to get mem page size");
-		return -ENOMEM;
-	}
 	sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
 				   MLX5_TXPP_REARM_SQ_SIZE *
 				   sizeof(struct mlx5_txpp_ts),
@@ -469,26 +424,6 @@
 	}
 	/* There should not be WQE leftovers in the cyclic queue. */
 	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
-	umem_size =  MLX5_WQE_SIZE * wq->sq_size;
-	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-	umem_size += MLX5_DBR_SIZE;
-	wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
-				 page_size, sh->numa_node);
-	if (!wq->sq_buf) {
-		DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
-		rte_errno = ENOMEM;
-		goto error;
-	}
-	/* Register allocated buffer in user space with DevX. */
-	wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-					       (void *)(uintptr_t)wq->sq_buf,
-					       umem_size,
-					       IBV_ACCESS_LOCAL_WRITE);
-	if (!wq->sq_umem) {
-		rte_errno = errno;
-		DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
-		goto error;
-	}
 	/* Create send queue object for Clock Queue. */
 	if (sh->txpp.test) {
 		sq_attr.tis_lst_sz = 1;
@@ -499,37 +434,25 @@
 		sq_attr.non_wire = 1;
 		sq_attr.static_sq_wq = 1;
 	}
-	sq_attr.state = MLX5_SQC_STATE_RST;
 	sq_attr.cqn = wq->cq_obj.cq->id;
 	sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
 	sq_attr.wq_attr.cd_slave = 1;
 	sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
 	sq_attr.wq_attr.pd = sh->pdn;
-	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
-	sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
-	sq_attr.wq_attr.dbr_umem_valid = 1;
-	sq_attr.wq_attr.dbr_addr = umem_dbrec;
-	sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
-	sq_attr.wq_attr.wq_umem_valid = 1;
-	sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
-	/* umem_offset must be zero for static_sq_wq queue. */
-	sq_attr.wq_attr.wq_umem_offset = 0;
-	wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
-	if (!wq->sq) {
+	ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj, log2above(wq->sq_size),
+				  &sq_attr, sh->numa_node);
+	if (ret) {
 		rte_errno = errno;
 		DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
 		goto error;
 	}
-	wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
-				   MLX5_SND_DBR * sizeof(uint32_t));
 	/* Build the WQEs in the Send Queue before goto Ready state. */
 	mlx5_txpp_fill_wqe_clock_queue(sh);
 	/* Change queue state to ready. */
 	msq_attr.sq_state = MLX5_SQC_STATE_RST;
 	msq_attr.state = MLX5_SQC_STATE_RDY;
 	wq->sq_ci = 0;
-	ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
+	ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
 		goto error;
-- 
1.8.3.1


  parent reply	other threads:[~2020-12-29  8:57 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-17 11:44 [dpdk-dev] [PATCH 00/17] common/mlx5: share DevX resources creations Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 01/17] net/mlx5: fix ASO SQ creation error flow Michael Baum
2020-12-29  8:52   ` [dpdk-dev] [PATCH v2 00/17] common/mlx5: share DevX resources creations Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 01/17] net/mlx5: fix ASO SQ creation error flow Michael Baum
2021-01-06  8:19       ` [dpdk-dev] [PATCH v3 00/19] common/mlx5: share DevX resources creations Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 01/19] common/mlx5: fix completion queue entry size configuration Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 02/19] net/mlx5: remove CQE padding device argument Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 03/19] net/mlx5: fix ASO SQ creation error flow Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 04/19] common/mlx5: share DevX CQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 05/19] regex/mlx5: move DevX CQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 06/19] vdpa/mlx5: " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 07/19] net/mlx5: move rearm and clock queue " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 08/19] net/mlx5: move ASO " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 09/19] net/mlx5: move Tx " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 10/19] net/mlx5: move Rx " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 11/19] common/mlx5: enhance page size configuration Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 12/19] common/mlx5: share DevX SQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 13/19] regex/mlx5: move DevX SQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 14/19] net/mlx5: move rearm and clock queue " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 15/19] net/mlx5: move Tx " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 16/19] net/mlx5: move ASO " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 17/19] common/mlx5: share DevX RQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 18/19] net/mlx5: move Rx RQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 19/19] common/mlx5: remove doorbell allocation API Michael Baum
2021-01-12 21:39         ` [dpdk-dev] [PATCH v3 00/19] common/mlx5: share DevX resources creations Thomas Monjalon
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 02/17] common/mlx5: share DevX CQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 03/17] regex/mlx5: move DevX CQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 04/17] vdpa/mlx5: " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 05/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 06/17] net/mlx5: move ASO " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 07/17] net/mlx5: move Tx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 08/17] net/mlx5: move Rx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 09/17] common/mlx5: enhance page size configuration Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 10/17] common/mlx5: share DevX SQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 11/17] regex/mlx5: move DevX SQ creation to common Michael Baum
2020-12-29  8:52     ` Michael Baum [this message]
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 13/17] net/mlx5: move Tx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 14/17] net/mlx5: move ASO " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 15/17] common/mlx5: share DevX RQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 16/17] net/mlx5: move Rx RQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 17/17] common/mlx5: remove doorbell allocation API Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 02/17] common/mlx5: share DevX CQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 03/17] regex/mlx5: move DevX CQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 04/17] vdpa/mlx5: " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 05/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 06/17] net/mlx5: move ASO " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 07/17] net/mlx5: move Tx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 08/17] net/mlx5: move Rx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 09/17] common/mlx5: enhance page size configuration Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 10/17] common/mlx5: share DevX SQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 11/17] regex/mlx5: move DevX SQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 12/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 13/17] net/mlx5: move Tx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 14/17] net/mlx5: move ASO " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 15/17] common/mlx5: share DevX RQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 16/17] net/mlx5: move Rx RQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 17/17] common/mlx5: remove doorbell allocation API Michael Baum

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1609231944-29274-13-git-send-email-michaelba@nvidia.com \
    --to=michaelba@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.