All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
	Raslan Darawsheh <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 05/17] net/mlx5: move rearm and clock queue CQ creation to common
Date: Tue, 29 Dec 2020 08:52:12 +0000	[thread overview]
Message-ID: <1609231944-29274-6-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1609231944-29274-1-git-send-email-michaelba@nvidia.com>

Using common function for CQ creation at rearm queue and clock queue.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.h      |   9 +--
 drivers/net/mlx5/mlx5_rxtx.c |   2 +-
 drivers/net/mlx5/mlx5_txpp.c | 147 +++++++++++--------------------------------
 3 files changed, 40 insertions(+), 118 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 121d726..00ccaee 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -26,6 +26,7 @@
 #include <mlx5_prm.h>
 #include <mlx5_common_mp.h>
 #include <mlx5_common_mr.h>
+#include <mlx5_common_devx.h>
 
 #include "mlx5_defs.h"
 #include "mlx5_utils.h"
@@ -612,13 +613,7 @@ struct mlx5_flow_id_pool {
 /* Tx pacing queue structure - for Clock and Rearm queues. */
 struct mlx5_txpp_wq {
 	/* Completion Queue related data.*/
-	struct mlx5_devx_obj *cq;
-	void *cq_umem;
-	union {
-		volatile void *cq_buf;
-		volatile struct mlx5_cqe *cqes;
-	};
-	volatile uint32_t *cq_dbrec;
+	struct mlx5_devx_cq cq_obj;
 	uint32_t cq_ci:24;
 	uint32_t arm_sn:2;
 	/* Send Queue related data.*/
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index d12d746..dad24a3 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -2277,7 +2277,7 @@ enum mlx5_txcmp_code {
 
 	qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
 	qs->max_index = rte_cpu_to_be_32(wci);
-	qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);
+	qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq_obj.cq->id);
 	qs->reserved0 = RTE_BE32(0);
 	qs->reserved1 = RTE_BE32(0);
 }
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 2438bf1..54ea572 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -13,6 +13,7 @@
 #include <rte_eal_paging.h>
 
 #include <mlx5_malloc.h>
+#include <mlx5_common_devx.h>
 
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
@@ -126,12 +127,7 @@
 		claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
 	if (wq->sq_buf)
 		mlx5_free((void *)(uintptr_t)wq->sq_buf);
-	if (wq->cq)
-		claim_zero(mlx5_devx_cmd_destroy(wq->cq));
-	if (wq->cq_umem)
-		claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
-	if (wq->cq_buf)
-		mlx5_free((void *)(uintptr_t)wq->cq_buf);
+	mlx5_devx_cq_destroy(&wq->cq_obj);
 	memset(wq, 0, sizeof(*wq));
 }
 
@@ -181,19 +177,6 @@
 }
 
 static void
-mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
-{
-	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
-	struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
-	uint32_t i;
-
-	for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
-		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
-		++cqe;
-	}
-}
-
-static void
 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
 {
 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
@@ -228,7 +211,8 @@
 		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
 			((1 << MLX5_CQ_INDEX_WIDTH) - 1);
 		qs->max_index = rte_cpu_to_be_32(index);
-		qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
+		qs->qpn_cqn =
+			   rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id);
 	}
 }
 
@@ -238,7 +222,11 @@
 {
 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
-	struct mlx5_devx_cq_attr cq_attr = { 0 };
+	struct mlx5_devx_cq_attr cq_attr = {
+		.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
+					 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B,
+		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+	};
 	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
 	size_t page_size;
 	uint32_t umem_size, umem_dbrec;
@@ -249,50 +237,16 @@
 		DRV_LOG(ERR, "Failed to get mem page size");
 		return -ENOMEM;
 	}
-	/* Allocate memory buffer for CQEs and doorbell record. */
-	umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
-	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-	umem_size += MLX5_DBR_SIZE;
-	wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
-				 page_size, sh->numa_node);
-	if (!wq->cq_buf) {
-		DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
-		return -ENOMEM;
-	}
-	/* Register allocated buffer in user space with DevX. */
-	wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-					       (void *)(uintptr_t)wq->cq_buf,
-					       umem_size,
-					       IBV_ACCESS_LOCAL_WRITE);
-	if (!wq->cq_umem) {
-		rte_errno = errno;
-		DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
-		goto error;
-	}
 	/* Create completion queue object for Rearm Queue. */
-	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
-			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
-	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-	cq_attr.eqn = sh->eqn;
-	cq_attr.q_umem_valid = 1;
-	cq_attr.q_umem_offset = 0;
-	cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
-	cq_attr.db_umem_valid = 1;
-	cq_attr.db_umem_offset = umem_dbrec;
-	cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
-	cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
-	cq_attr.log_page_size = rte_log2_u32(page_size);
-	wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
-	if (!wq->cq) {
-		rte_errno = errno;
+	ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+				  log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
+				  sh->numa_node);
+	if (ret) {
 		DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
-		goto error;
+		return ret;
 	}
-	wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
 	wq->cq_ci = 0;
 	wq->arm_sn = 0;
-	/* Mark all CQEs initially as invalid. */
-	mlx5_txpp_fill_cqe_rearm_queue(sh);
 	/*
 	 * Allocate memory buffer for Send Queue WQEs.
 	 * There should be no WQE leftovers in the cyclic queue.
@@ -323,7 +277,7 @@
 	sq_attr.state = MLX5_SQC_STATE_RST;
 	sq_attr.tis_lst_sz = 1;
 	sq_attr.tis_num = sh->tis->id;
-	sq_attr.cqn = wq->cq->id;
+	sq_attr.cqn = wq->cq_obj.cq->id;
 	sq_attr.cd_master = 1;
 	sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
 	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
@@ -466,7 +420,13 @@
 {
 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
-	struct mlx5_devx_cq_attr cq_attr = { 0 };
+	struct mlx5_devx_cq_attr cq_attr = {
+		.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
+					 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B,
+		.use_first_only = 1,
+		.overrun_ignore = 1,
+		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+	};
 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
 	size_t page_size;
 	uint32_t umem_size, umem_dbrec;
@@ -487,48 +447,14 @@
 	}
 	sh->txpp.ts_p = 0;
 	sh->txpp.ts_n = 0;
-	/* Allocate memory buffer for CQEs and doorbell record. */
-	umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
-	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-	umem_size += MLX5_DBR_SIZE;
-	wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
-					page_size, sh->numa_node);
-	if (!wq->cq_buf) {
-		DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
-		return -ENOMEM;
-	}
-	/* Register allocated buffer in user space with DevX. */
-	wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-					       (void *)(uintptr_t)wq->cq_buf,
-					       umem_size,
-					       IBV_ACCESS_LOCAL_WRITE);
-	if (!wq->cq_umem) {
-		rte_errno = errno;
-		DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
-		goto error;
-	}
 	/* Create completion queue object for Clock Queue. */
-	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
-			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
-	cq_attr.use_first_only = 1;
-	cq_attr.overrun_ignore = 1;
-	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-	cq_attr.eqn = sh->eqn;
-	cq_attr.q_umem_valid = 1;
-	cq_attr.q_umem_offset = 0;
-	cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
-	cq_attr.db_umem_valid = 1;
-	cq_attr.db_umem_offset = umem_dbrec;
-	cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
-	cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
-	cq_attr.log_page_size = rte_log2_u32(page_size);
-	wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
-	if (!wq->cq) {
-		rte_errno = errno;
+	ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+				  log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
+				  sh->numa_node);
+	if (ret) {
 		DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
 		goto error;
 	}
-	wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
 	wq->cq_ci = 0;
 	/* Allocate memory buffer for Send Queue WQEs. */
 	if (sh->txpp.test) {
@@ -574,7 +500,7 @@
 		sq_attr.static_sq_wq = 1;
 	}
 	sq_attr.state = MLX5_SQC_STATE_RST;
-	sq_attr.cqn = wq->cq->id;
+	sq_attr.cqn = wq->cq_obj.cq->id;
 	sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
 	sq_attr.wq_attr.cd_slave = 1;
 	sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
@@ -625,12 +551,13 @@
 	struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
 	uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
 	uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
-	uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);
+	uint64_t db_be =
+		rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);
 	base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
 	uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
 
 	rte_compiler_barrier();
-	aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
+	aq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
 	rte_wmb();
 #ifdef RTE_ARCH_64
 	*(uint64_t *)addr = db_be;
@@ -728,7 +655,7 @@
 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
 {
 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-	struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
+	struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
 	union {
 		rte_int128_t u128;
 		struct mlx5_cqe_ts cts;
@@ -809,7 +736,7 @@
 	do {
 		volatile struct mlx5_cqe *cqe;
 
-		cqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
+		cqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
 		ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
 		switch (ret) {
 		case MLX5_CQE_STATUS_ERR:
@@ -841,7 +768,7 @@
 		}
 		/* Update doorbell record to notify hardware. */
 		rte_compiler_barrier();
-		*wq->cq_dbrec = rte_cpu_to_be_32(cq_ci);
+		*wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci);
 		rte_wmb();
 		wq->cq_ci = cq_ci;
 		/* Fire new requests to Rearm Queue. */
@@ -936,9 +863,8 @@
 	}
 	/* Subscribe CQ event to the event channel controlled by the driver. */
 	ret = mlx5_glue->devx_subscribe_devx_event(sh->txpp.echan,
-						   sh->txpp.rearm_queue.cq->obj,
-						   sizeof(event_nums),
-						   event_nums, 0);
+					    sh->txpp.rearm_queue.cq_obj.cq->obj,
+					     sizeof(event_nums), event_nums, 0);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to subscribe CQE event.");
 		rte_errno = errno;
@@ -1140,7 +1066,8 @@
 
 	if (sh->txpp.refcnt) {
 		struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-		struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
+		struct mlx5_cqe *cqe =
+				(struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
 		union {
 			rte_int128_t u128;
 			struct mlx5_cqe_ts cts;
-- 
1.8.3.1


  parent reply	other threads:[~2020-12-29  8:55 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-17 11:44 [dpdk-dev] [PATCH 00/17] common/mlx5: share DevX resources creations Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 01/17] net/mlx5: fix ASO SQ creation error flow Michael Baum
2020-12-29  8:52   ` [dpdk-dev] [PATCH v2 00/17] common/mlx5: share DevX resources creations Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 01/17] net/mlx5: fix ASO SQ creation error flow Michael Baum
2021-01-06  8:19       ` [dpdk-dev] [PATCH v3 00/19] common/mlx5: share DevX resources creations Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 01/19] common/mlx5: fix completion queue entry size configuration Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 02/19] net/mlx5: remove CQE padding device argument Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 03/19] net/mlx5: fix ASO SQ creation error flow Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 04/19] common/mlx5: share DevX CQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 05/19] regex/mlx5: move DevX CQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 06/19] vdpa/mlx5: " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 07/19] net/mlx5: move rearm and clock queue " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 08/19] net/mlx5: move ASO " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 09/19] net/mlx5: move Tx " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 10/19] net/mlx5: move Rx " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 11/19] common/mlx5: enhance page size configuration Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 12/19] common/mlx5: share DevX SQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 13/19] regex/mlx5: move DevX SQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 14/19] net/mlx5: move rearm and clock queue " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 15/19] net/mlx5: move Tx " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 16/19] net/mlx5: move ASO " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 17/19] common/mlx5: share DevX RQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 18/19] net/mlx5: move Rx RQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 19/19] common/mlx5: remove doorbell allocation API Michael Baum
2021-01-12 21:39         ` [dpdk-dev] [PATCH v3 00/19] common/mlx5: share DevX resources creations Thomas Monjalon
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 02/17] common/mlx5: share DevX CQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 03/17] regex/mlx5: move DevX CQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 04/17] vdpa/mlx5: " Michael Baum
2020-12-29  8:52     ` Michael Baum [this message]
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 06/17] net/mlx5: move ASO " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 07/17] net/mlx5: move Tx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 08/17] net/mlx5: move Rx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 09/17] common/mlx5: enhance page size configuration Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 10/17] common/mlx5: share DevX SQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 11/17] regex/mlx5: move DevX SQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 12/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 13/17] net/mlx5: move Tx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 14/17] net/mlx5: move ASO " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 15/17] common/mlx5: share DevX RQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 16/17] net/mlx5: move Rx RQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 17/17] common/mlx5: remove doorbell allocation API Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 02/17] common/mlx5: share DevX CQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 03/17] regex/mlx5: move DevX CQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 04/17] vdpa/mlx5: " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 05/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 06/17] net/mlx5: move ASO " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 07/17] net/mlx5: move Tx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 08/17] net/mlx5: move Rx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 09/17] common/mlx5: enhance page size configuration Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 10/17] common/mlx5: share DevX SQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 11/17] regex/mlx5: move DevX SQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 12/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 13/17] net/mlx5: move Tx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 14/17] net/mlx5: move ASO " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 15/17] common/mlx5: share DevX RQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 16/17] net/mlx5: move Rx RQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 17/17] common/mlx5: remove doorbell allocation API Michael Baum

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1609231944-29274-6-git-send-email-michaelba@nvidia.com \
    --to=michaelba@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.