All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
To: Olivier Matz <olivier.matz@6wind.com>,
	David Marchand <david.marchand@redhat.com>,
	Jerin Jacob <jerinj@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>,
	Harman Kalra <hkalra@marvell.com>,
	Anoob Joseph <anoobj@marvell.com>
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 3/6] mempool: add namespace to internal but still visible API
Date: Tue, 19 Oct 2021 13:08:42 +0300	[thread overview]
Message-ID: <20211019100845.1632332-4-andrew.rybchenko@oktetlabs.ru> (raw)
In-Reply-To: <20211019100845.1632332-1-andrew.rybchenko@oktetlabs.ru>

Add RTE_ prefix to internal API defined in public header.
Use the prefix instead of double underscore.
Use uppercase for macros in the case of name conflict.

Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
---
 drivers/event/octeontx/ssovf_worker.h      |  2 +-
 drivers/net/cnxk/cn10k_rx.h                | 12 ++--
 drivers/net/cnxk/cn10k_tx.h                | 30 ++++-----
 drivers/net/cnxk/cn9k_rx.h                 | 12 ++--
 drivers/net/cnxk/cn9k_tx.h                 | 26 ++++----
 drivers/net/octeontx/octeontx_rxtx.h       |  4 +-
 drivers/net/octeontx2/otx2_ethdev_sec_tx.h |  2 +-
 drivers/net/octeontx2/otx2_rx.c            |  8 +--
 drivers/net/octeontx2/otx2_rx.h            |  4 +-
 drivers/net/octeontx2/otx2_tx.c            | 16 ++---
 drivers/net/octeontx2/otx2_tx.h            |  4 +-
 lib/mempool/rte_mempool.c                  |  8 +--
 lib/mempool/rte_mempool.h                  | 77 +++++++++++-----------
 13 files changed, 103 insertions(+), 102 deletions(-)

diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index f609b296ed..ba9e1cd0fa 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -83,7 +83,7 @@ ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
 
 		mbuf->data_off = sizeof(octtx_pki_buflink_t);
 
-		__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
 		if (nb_segs == 1)
 			mbuf->data_len = bytes_left;
 		else
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index fcc451aa36..6b40a9d0b5 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -276,7 +276,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
 		mbuf = mbuf->next;
 
-		__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
 
 		mbuf->data_len = sg & 0xFFFF;
 		sg = sg >> 16;
@@ -306,7 +306,7 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
 	uint64_t ol_flags = 0;
 
 	/* Mark mempool obj as "get" as it is alloc'ed by NIX */
-	__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+	RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
 
 	if (flag & NIX_RX_OFFLOAD_PTYPE_F)
 		mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
@@ -905,10 +905,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		roc_prefetch_store_keep(mbuf3);
 
 		/* Mark mempool obj as "get" as it is alloc'ed by NIX */
-		__mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
-		__mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
-		__mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
-		__mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
 
 		packets += NIX_DESCS_PER_LOOP;
 
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index c6f349b352..0fd877f4ec 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -677,7 +677,7 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
 		}
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	} else {
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
@@ -789,7 +789,7 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	rte_io_wmb();
 #endif
 	m = m_next;
@@ -808,7 +808,7 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 			 */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 #endif
 		slist++;
 		i++;
@@ -1177,7 +1177,7 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	rte_io_wmb();
 #endif
 
@@ -1194,7 +1194,7 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 			 */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -1235,7 +1235,7 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		sg.u = vgetq_lane_u64(cmd1[0], 0);
 		if (!(sg.u & (1ULL << 55)))
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		rte_io_wmb();
 #endif
 		return;
@@ -1425,7 +1425,7 @@ cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr,
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		sg.u = vgetq_lane_u64(cmd1, 0);
 		if (!(sg.u & (1ULL << 55)))
-			__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1,
+			RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1,
 						0);
 		rte_io_wmb();
 #endif
@@ -2352,28 +2352,28 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0))
 				vsetq_lane_u64(0x80000, xmask01, 0);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf0)->pool,
 					(void **)&mbuf0, 1, 0);
 
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1))
 				vsetq_lane_u64(0x80000, xmask01, 1);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf1)->pool,
 					(void **)&mbuf1, 1, 0);
 
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2))
 				vsetq_lane_u64(0x80000, xmask23, 0);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf2)->pool,
 					(void **)&mbuf2, 1, 0);
 
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3))
 				vsetq_lane_u64(0x80000, xmask23, 1);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf3)->pool,
 					(void **)&mbuf3, 1, 0);
 			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
@@ -2389,19 +2389,19 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			/* Mark mempool object as "put" since
 			 * it is freed by NIX
 			 */
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf0)->pool,
 				(void **)&mbuf0, 1, 0);
 
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf1)->pool,
 				(void **)&mbuf1, 1, 0);
 
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf2)->pool,
 				(void **)&mbuf2, 1, 0);
 
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf3)->pool,
 				(void **)&mbuf3, 1, 0);
 		}
diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h
index 7ab415a194..ba3c3668f7 100644
--- a/drivers/net/cnxk/cn9k_rx.h
+++ b/drivers/net/cnxk/cn9k_rx.h
@@ -151,7 +151,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
 		mbuf = mbuf->next;
 
-		__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
 
 		mbuf->data_len = sg & 0xFFFF;
 		sg = sg >> 16;
@@ -288,7 +288,7 @@ cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
 	uint64_t ol_flags = 0;
 
 	/* Mark mempool obj as "get" as it is alloc'ed by NIX */
-	__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+	RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
 
 	if (flag & NIX_RX_OFFLOAD_PTYPE_F)
 		packet_type = nix_ptype_get(lookup_mem, w1);
@@ -757,10 +757,10 @@ cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
 		roc_prefetch_store_keep(mbuf3);
 
 		/* Mark mempool obj as "get" as it is alloc'ed by NIX */
-		__mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
-		__mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
-		__mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
-		__mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
 
 		/* Advance head pointer and packets */
 		head += NIX_DESCS_PER_LOOP;
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index 44273eca90..83f4be84f1 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -285,7 +285,7 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
 		}
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	}
 }
 
@@ -397,7 +397,7 @@ cn9k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -611,7 +611,7 @@ cn9k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	rte_io_wmb();
 #endif
 
@@ -628,7 +628,7 @@ cn9k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 			 */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -680,7 +680,7 @@ cn9k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		sg.u = vgetq_lane_u64(cmd1[0], 0);
 		if (!(sg.u & (1ULL << 55)))
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		rte_io_wmb();
 #endif
 		return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) +
@@ -1627,28 +1627,28 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0))
 				vsetq_lane_u64(0x80000, xmask01, 0);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf0)->pool,
 					(void **)&mbuf0, 1, 0);
 
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1))
 				vsetq_lane_u64(0x80000, xmask01, 1);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf1)->pool,
 					(void **)&mbuf1, 1, 0);
 
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2))
 				vsetq_lane_u64(0x80000, xmask23, 0);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf2)->pool,
 					(void **)&mbuf2, 1, 0);
 
 			if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3))
 				vsetq_lane_u64(0x80000, xmask23, 1);
 			else
-				__mempool_check_cookies(
+				RTE_MEMPOOL_CHECK_COOKIES(
 					((struct rte_mbuf *)mbuf3)->pool,
 					(void **)&mbuf3, 1, 0);
 			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
@@ -1667,19 +1667,19 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			/* Mark mempool object as "put" since
 			 * it is freed by NIX
 			 */
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf0)->pool,
 				(void **)&mbuf0, 1, 0);
 
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf1)->pool,
 				(void **)&mbuf1, 1, 0);
 
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf2)->pool,
 				(void **)&mbuf2, 1, 0);
 
-			__mempool_check_cookies(
+			RTE_MEMPOOL_CHECK_COOKIES(
 				((struct rte_mbuf *)mbuf3)->pool,
 				(void **)&mbuf3, 1, 0);
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index e0723ac26a..9af797c36c 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -344,7 +344,7 @@ __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
 
 	/* Mark mempool object as "put" since it is freed by PKO */
 	if (!(cmd_buf[0] & (1ULL << 58)))
-		__mempool_check_cookies(m_tofree->pool, (void **)&m_tofree,
+		RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool, (void **)&m_tofree,
 					1, 0);
 	/* Get the gaura Id */
 	gaura_id =
@@ -417,7 +417,7 @@ __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
 		 */
 		if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
 			tx_pkt->next = NULL;
-			__mempool_check_cookies(m_tofree->pool,
+			RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool,
 						(void **)&m_tofree, 1, 0);
 		}
 		nb_desc++;
diff --git a/drivers/net/octeontx2/otx2_ethdev_sec_tx.h b/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
index 623a2a841e..65140b759c 100644
--- a/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
+++ b/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
@@ -146,7 +146,7 @@ otx2_sec_event_tx(uint64_t base, struct rte_event *ev, struct rte_mbuf *m,
 	sd->nix_iova.addr = rte_mbuf_data_iova(m);
 
 	/* Mark mempool object as "put" since it is freed by NIX */
-	__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+	RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 
 	if (!ev->sched_type)
 		otx2_ssogws_head_wait(base + SSOW_LF_GWS_TAG);
diff --git a/drivers/net/octeontx2/otx2_rx.c b/drivers/net/octeontx2/otx2_rx.c
index ffeade5952..0d85c898bf 100644
--- a/drivers/net/octeontx2/otx2_rx.c
+++ b/drivers/net/octeontx2/otx2_rx.c
@@ -296,10 +296,10 @@ nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
 		otx2_prefetch_store_keep(mbuf3);
 
 		/* Mark mempool obj as "get" as it is alloc'ed by NIX */
-		__mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
-		__mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
-		__mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
-		__mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
 
 		/* Advance head pointer and packets */
 		head += NIX_DESCS_PER_LOOP; head &= qmask;
diff --git a/drivers/net/octeontx2/otx2_rx.h b/drivers/net/octeontx2/otx2_rx.h
index ea29aec62f..3dcc563be1 100644
--- a/drivers/net/octeontx2/otx2_rx.h
+++ b/drivers/net/octeontx2/otx2_rx.h
@@ -199,7 +199,7 @@ nix_cqe_xtract_mseg(const struct nix_rx_parse_s *rx,
 		mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
 		mbuf = mbuf->next;
 
-		__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
 
 		mbuf->data_len = sg & 0xFFFF;
 		sg = sg >> 16;
@@ -309,7 +309,7 @@ otx2_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
 	uint64_t ol_flags = 0;
 
 	/* Mark mempool obj as "get" as it is alloc'ed by NIX */
-	__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+	RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
 
 	if (flag & NIX_RX_OFFLOAD_PTYPE_F)
 		mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index ff299f00b9..ad704d745b 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -202,7 +202,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask01, 0);
 			else
-				__mempool_check_cookies(mbuf->pool,
+				RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
 							(void **)&mbuf,
 							1, 0);
 
@@ -211,7 +211,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask01, 1);
 			else
-				__mempool_check_cookies(mbuf->pool,
+				RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
 							(void **)&mbuf,
 							1, 0);
 
@@ -220,7 +220,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask23, 0);
 			else
-				__mempool_check_cookies(mbuf->pool,
+				RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
 							(void **)&mbuf,
 							1, 0);
 
@@ -229,7 +229,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask23, 1);
 			else
-				__mempool_check_cookies(mbuf->pool,
+				RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
 							(void **)&mbuf,
 							1, 0);
 			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
@@ -245,22 +245,22 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 */
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
 				offsetof(struct rte_mbuf, buf_iova));
-			__mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+			RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
 						1, 0);
 
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
 				offsetof(struct rte_mbuf, buf_iova));
-			__mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+			RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
 						1, 0);
 
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
 				offsetof(struct rte_mbuf, buf_iova));
-			__mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+			RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
 						1, 0);
 
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
 				offsetof(struct rte_mbuf, buf_iova));
-			__mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+			RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
 						1, 0);
 			RTE_SET_USED(mbuf);
 		}
diff --git a/drivers/net/octeontx2/otx2_tx.h b/drivers/net/octeontx2/otx2_tx.h
index 486248dff7..de1be0093c 100644
--- a/drivers/net/octeontx2/otx2_tx.h
+++ b/drivers/net/octeontx2/otx2_tx.h
@@ -372,7 +372,7 @@ otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
 		}
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	}
 }
 
@@ -450,7 +450,7 @@ otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 19210c702c..638eaa5fa2 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -167,7 +167,7 @@ mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
-	tlr = __mempool_get_trailer(obj);
+	tlr = rte_mempool_get_trailer(obj);
 	tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
 #endif
 }
@@ -1064,7 +1064,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,
 			rte_panic("MEMPOOL: object is owned by another "
 				  "mempool\n");
 
-		hdr = __mempool_get_header(obj);
+		hdr = rte_mempool_get_header(obj);
 		cookie = hdr->cookie;
 
 		if (free == 0) {
@@ -1092,7 +1092,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,
 				rte_panic("MEMPOOL: bad header cookie (audit)\n");
 			}
 		}
-		tlr = __mempool_get_trailer(obj);
+		tlr = rte_mempool_get_trailer(obj);
 		cookie = tlr->cookie;
 		if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
 			RTE_LOG(CRIT, MEMPOOL,
@@ -1144,7 +1144,7 @@ static void
 mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
 	void *obj, __rte_unused unsigned idx)
 {
-	__mempool_check_cookies(mp, &obj, 1, 2);
+	RTE_MEMPOOL_CHECK_COOKIES(mp, &obj, 1, 2);
 }
 
 static void
diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
index d4bcb009fa..979ab071cb 100644
--- a/lib/mempool/rte_mempool.h
+++ b/lib/mempool/rte_mempool.h
@@ -299,14 +299,14 @@ struct rte_mempool {
  *   Number to add to the object-oriented statistics.
  */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __MEMPOOL_STAT_ADD(mp, name, n) do {                    \
+#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {                  \
 		unsigned __lcore_id = rte_lcore_id();           \
 		if (__lcore_id < RTE_MAX_LCORE) {               \
 			mp->stats[__lcore_id].name += n;        \
 		}                                               \
-	} while(0)
+	} while (0)
 #else
-#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
 #endif
 
 /**
@@ -322,7 +322,8 @@ struct rte_mempool {
 	(sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
 
 /* return the header of a mempool object (internal) */
-static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
+static inline struct rte_mempool_objhdr *
+rte_mempool_get_header(void *obj)
 {
 	return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
 		sizeof(struct rte_mempool_objhdr));
@@ -339,12 +340,12 @@ static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
  */
 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
 {
-	struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
+	struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
 	return hdr->mp;
 }
 
 /* return the trailer of a mempool object (internal) */
-static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
+static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
 {
 	struct rte_mempool *mp = rte_mempool_from_obj(obj);
 	return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
@@ -368,10 +369,10 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,
 	void * const *obj_table_const, unsigned n, int free);
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __mempool_check_cookies(mp, obj_table_const, n, free) \
+#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
 	rte_mempool_check_cookies(mp, obj_table_const, n, free)
 #else
-#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
+#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
 
 /**
@@ -393,13 +394,13 @@ void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
 	void * const *first_obj_table_const, unsigned int n, int free);
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
-					      free) \
+#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
+						free) \
 	rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
 						free)
 #else
-#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
-					      free) \
+#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
+						free) \
 	do {} while (0)
 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
 
@@ -734,8 +735,8 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
 	ops = rte_mempool_get_ops(mp->ops_index);
 	ret = ops->dequeue(mp, obj_table, n);
 	if (ret == 0) {
-		__MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
-		__MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
+		RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
+		RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
 	}
 	return ret;
 }
@@ -784,8 +785,8 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
 {
 	struct rte_mempool_ops *ops;
 
-	__MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
-	__MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
+	RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
+	RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
 	rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
 	ops = rte_mempool_get_ops(mp->ops_index);
 	return ops->enqueue(mp, obj_table, n);
@@ -1310,14 +1311,14 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache,
  *   A pointer to a mempool cache structure. May be NULL if not needed.
  */
 static __rte_always_inline void
-__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
-		      unsigned int n, struct rte_mempool_cache *cache)
+rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
+			   unsigned int n, struct rte_mempool_cache *cache)
 {
 	void **cache_objs;
 
 	/* increment stat now, adding in mempool always success */
-	__MEMPOOL_STAT_ADD(mp, put_bulk, 1);
-	__MEMPOOL_STAT_ADD(mp, put_objs, n);
+	RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
+	RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
 
 	/* No cache provided or if put would overflow mem allocated for cache */
 	if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
@@ -1374,8 +1375,8 @@ rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
 			unsigned int n, struct rte_mempool_cache *cache)
 {
 	rte_mempool_trace_generic_put(mp, obj_table, n, cache);
-	__mempool_check_cookies(mp, obj_table, n, 0);
-	__mempool_generic_put(mp, obj_table, n, cache);
+	RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
+	rte_mempool_do_generic_put(mp, obj_table, n, cache);
 }
 
 /**
@@ -1435,8 +1436,8 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
  *   - <0: Error; code of ring dequeue function.
  */
 static __rte_always_inline int
-__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
-		      unsigned int n, struct rte_mempool_cache *cache)
+rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
+			   unsigned int n, struct rte_mempool_cache *cache)
 {
 	int ret;
 	uint32_t index, len;
@@ -1475,8 +1476,8 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 
 	cache->len -= n;
 
-	__MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
-	__MEMPOOL_STAT_ADD(mp, get_success_objs, n);
+	RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+	RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
 
 	return 0;
 
@@ -1486,11 +1487,11 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 	ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
 
 	if (ret < 0) {
-		__MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
-		__MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
+		RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+		RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
 	} else {
-		__MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
-		__MEMPOOL_STAT_ADD(mp, get_success_objs, n);
+		RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+		RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
 	}
 
 	return ret;
@@ -1521,9 +1522,9 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 			unsigned int n, struct rte_mempool_cache *cache)
 {
 	int ret;
-	ret = __mempool_generic_get(mp, obj_table, n, cache);
+	ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
 	if (ret == 0)
-		__mempool_check_cookies(mp, obj_table, n, 1);
+		RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
 	rte_mempool_trace_generic_get(mp, obj_table, n, cache);
 	return ret;
 }
@@ -1614,13 +1615,13 @@ rte_mempool_get_contig_blocks(struct rte_mempool *mp,
 
 	ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
 	if (ret == 0) {
-		__MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
-		__MEMPOOL_STAT_ADD(mp, get_success_blks, n);
-		__mempool_contig_blocks_check_cookies(mp, first_obj_table, n,
-						      1);
+		RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+		RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
+		RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
+							1);
 	} else {
-		__MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
-		__MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
+		RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+		RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
 	}
 
 	rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
-- 
2.30.2


  parent reply	other threads:[~2021-10-19 10:09 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-18 14:49 [dpdk-dev] [PATCH 0/6] mempool: cleanup namespace Andrew Rybchenko
2021-10-18 14:49 ` [dpdk-dev] [PATCH 1/6] mempool: avoid flags documentation in the next line Andrew Rybchenko
2021-10-18 14:49 ` [dpdk-dev] [PATCH 2/6] mempool: add namespace prefix to flags Andrew Rybchenko
2021-10-19  8:52   ` David Marchand
2021-10-19  9:40     ` Thomas Monjalon
2021-10-18 14:49 ` [dpdk-dev] [PATCH 3/6] mempool: add namespace to internal but still visible API Andrew Rybchenko
2021-10-19  8:47   ` David Marchand
2021-10-19  9:10     ` Andrew Rybchenko
2021-10-18 14:49 ` [dpdk-dev] [PATCH 4/6] mempool: make header size calculation internal Andrew Rybchenko
2021-10-19  8:48   ` David Marchand
2021-10-19  8:59     ` Andrew Rybchenko
2021-10-18 14:49 ` [dpdk-dev] [PATCH 5/6] mempool: add namespace to driver register macro Andrew Rybchenko
2021-10-19  8:49   ` David Marchand
2021-10-19  9:04     ` Andrew Rybchenko
2021-10-19  9:23       ` Andrew Rybchenko
2021-10-19  9:27       ` David Marchand
2021-10-19  9:38         ` Andrew Rybchenko
2021-10-19  9:42         ` Thomas Monjalon
2021-10-18 14:49 ` [dpdk-dev] [PATCH 6/6] mempool: deprecate unused defines Andrew Rybchenko
2021-10-19 10:08 ` [dpdk-dev] [PATCH v2 0/6] mempool: cleanup namespace Andrew Rybchenko
2021-10-19 10:08   ` [dpdk-dev] [PATCH v2 1/6] mempool: avoid flags documentation in the next line Andrew Rybchenko
2021-10-19 16:13     ` Olivier Matz
2021-10-19 10:08   ` [dpdk-dev] [PATCH v2 2/6] mempool: add namespace prefix to flags Andrew Rybchenko
2021-10-19 16:13     ` Olivier Matz
2021-10-19 16:15       ` Olivier Matz
2021-10-19 17:45       ` Andrew Rybchenko
2021-10-19 10:08   ` Andrew Rybchenko [this message]
2021-10-19 16:14     ` [dpdk-dev] [PATCH v2 3/6] mempool: add namespace to internal but still visible API Olivier Matz
2021-10-19 10:08   ` [dpdk-dev] [PATCH v2 4/6] mempool: make header size calculation internal Andrew Rybchenko
2021-10-19 16:14     ` Olivier Matz
2021-10-19 17:23       ` Andrew Rybchenko
2021-10-19 10:08   ` [dpdk-dev] [PATCH v2 5/6] mempool: add namespace to driver register macro Andrew Rybchenko
2021-10-19 16:16     ` Olivier Matz
2021-10-19 10:08   ` [dpdk-dev] [PATCH v2 6/6] mempool: deprecate unused defines Andrew Rybchenko
2021-10-19 16:21     ` Olivier Matz
2021-10-19 17:23       ` Andrew Rybchenko
2021-10-19 17:40 ` [dpdk-dev] [PATCH v3 0/6] mempool: cleanup namespace Andrew Rybchenko
2021-10-19 17:40   ` [dpdk-dev] [PATCH v3 1/6] mempool: avoid flags documentation in the next line Andrew Rybchenko
2021-10-19 17:40   ` [dpdk-dev] [PATCH v3 2/6] mempool: add namespace prefix to flags Andrew Rybchenko
2021-10-19 20:03     ` David Marchand
2021-10-20  7:50       ` Andrew Rybchenko
2021-10-19 17:40   ` [dpdk-dev] [PATCH v3 3/6] mempool: add namespace to internal but still visible API Andrew Rybchenko
2021-10-19 17:40   ` [dpdk-dev] [PATCH v3 4/6] mempool: make header size calculation internal Andrew Rybchenko
2021-10-20  6:55     ` Olivier Matz
2021-10-19 17:40   ` [dpdk-dev] [PATCH v3 5/6] mempool: add namespace to driver register macro Andrew Rybchenko
2021-10-20  6:57     ` Olivier Matz
2021-10-19 17:40   ` [dpdk-dev] [PATCH v3 6/6] mempool: deprecate unused defines Andrew Rybchenko
2021-10-20  7:08     ` Olivier Matz
2021-10-19 20:09   ` [dpdk-dev] [PATCH v3 0/6] mempool: cleanup namespace David Marchand
2021-10-20  7:52     ` David Marchand
2021-10-20  7:54       ` Andrew Rybchenko
2021-10-20  7:52     ` Andrew Rybchenko
2021-10-20  8:07       ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211019100845.1632332-4-andrew.rybchenko@oktetlabs.ru \
    --to=andrew.rybchenko@oktetlabs.ru \
    --cc=anoobj@marvell.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=hkalra@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=olivier.matz@6wind.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.