All of lore.kernel.org
 help / color / mirror / Atom feed
From: Konstantin Ananyev <konstantin.ananyev@intel.com>
To: dev@dpdk.org
Cc: thomas@monjalon.net, ferruh.yigit@intel.com,
	andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com,
	qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org,
	Konstantin Ananyev <konstantin.ananyev@intel.com>
Subject: [dpdk-dev] [RFC 3/7] eth: make drivers to use new API for Tx
Date: Fri, 20 Aug 2021 17:28:30 +0100	[thread overview]
Message-ID: <20210820162834.12544-4-konstantin.ananyev@intel.com> (raw)
In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com>

ethdev:
 - make changes so drivers can start using new API for tx_pkt_burst().
 - provide helper functions/macros.
 - remove tx_pkt_burst() from 'struct rte_eth_dev'.
drivers/net:
 - adjust to new tx_burst API.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 app/test/virtual_pmd.c                   | 12 ++-
 drivers/net/i40e/i40e_ethdev.c           |  2 +-
 drivers/net/i40e/i40e_ethdev_vf.c        |  3 +-
 drivers/net/i40e/i40e_rxtx.c             | 56 ++++++++------
 drivers/net/i40e/i40e_rxtx.h             | 16 ++--
 drivers/net/i40e/i40e_rxtx_vec_avx2.c    |  4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c  |  4 +-
 drivers/net/i40e/i40e_vf_representor.c   |  5 +-
 drivers/net/ice/ice_dcf_ethdev.c         |  5 +-
 drivers/net/ice/ice_dcf_vf_representor.c |  5 +-
 drivers/net/ice/ice_ethdev.c             |  2 +-
 drivers/net/ice/ice_rxtx.c               | 47 +++++++-----
 drivers/net/ice/ice_rxtx.h               | 20 ++---
 drivers/net/ice/ice_rxtx_vec_avx2.c      |  8 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c    |  8 +-
 drivers/net/ice/ice_rxtx_vec_common.h    |  7 +-
 drivers/net/ice/ice_rxtx_vec_sse.c       |  4 +-
 lib/ethdev/ethdev_driver.h               | 94 ++++++++++++++++++++++++
 lib/ethdev/rte_ethdev.c                  | 23 +++++-
 lib/ethdev/rte_ethdev.h                  | 37 +---------
 lib/ethdev/rte_ethdev_core.h             |  1 -
 lib/ethdev/version.map                   |  2 +
 22 files changed, 247 insertions(+), 118 deletions(-)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 734ef32c97..940b2af1ab 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -390,6 +390,8 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
 	return nb_pkts;
 }
 
+static _RTE_ETH_TX_DEF(virtual_ethdev_tx_burst_success)
+
 static uint16_t
 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
 		uint16_t nb_pkts)
@@ -425,6 +427,7 @@ virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
 	return 0;
 }
 
+static _RTE_ETH_TX_DEF(virtual_ethdev_tx_burst_fail)
 
 void
 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
@@ -447,9 +450,11 @@ virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
 	dev_private = vrtl_eth_dev->data->dev_private;
 
 	if (success)
-		vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
+		rte_eth_set_tx_burst(port_id,
+			_RTE_ETH_FUNC(virtual_ethdev_tx_burst_success));
 	else
-		vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
+		rte_eth_set_tx_burst(port_id,
+			_RTE_ETH_FUNC(virtual_ethdev_tx_burst_fail));
 
 	dev_private->tx_burst_fail_count = 0;
 }
@@ -605,7 +610,8 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 
 	rte_eth_set_rx_burst(eth_dev->data->port_id,
 			_RTE_ETH_FUNC(virtual_ethdev_rx_burst_success));
-	eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
+	rte_eth_set_tx_burst(eth_dev->data->port_id,
+			_RTE_ETH_FUNC(virtual_ethdev_tx_burst_success));
 
 	rte_eth_dev_probing_finish(eth_dev);
 
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4753af126d..9eb9129ae9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1438,7 +1438,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 	dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
 	dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
 	rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts));
-	dev->tx_pkt_burst = i40e_xmit_pkts;
+	rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts));
 	dev->tx_pkt_prepare = i40e_prep_pkts;
 
 	/* for secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index e08e97276a..3755bdb66a 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1578,7 +1578,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
 	eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
 	rte_eth_set_rx_burst(eth_dev->data->port_id,
 		_RTE_ETH_FUNC(i40e_recv_pkts));
-	eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+	rte_eth_set_tx_burst(eth_dev->data->port_id,
+		_RTE_ETH_FUNC(i40e_xmit_pkts));
 
 	/*
 	 * For secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index f2d0d35538..5a400435dd 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1067,7 +1067,7 @@ i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt)
 	return count;
 }
 
-uint16_t
+static inline uint16_t
 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct i40e_tx_queue *txq;
@@ -1315,6 +1315,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	return nb_tx;
 }
 
+_RTE_ETH_TX_DEF(i40e_xmit_pkts)
+
 static __rte_always_inline int
 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 {
@@ -1509,6 +1511,8 @@ i40e_xmit_pkts_simple(void *tx_queue,
 	return nb_tx;
 }
 
+static _RTE_ETH_TX_DEF(i40e_xmit_pkts_simple)
+
 static uint16_t
 i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		   uint16_t nb_pkts)
@@ -1531,6 +1535,8 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
+static _RTE_ETH_TX_DEF(i40e_xmit_pkts_vec)
+
 /*********************************************************************
  *
  *  TX simple prep functions
@@ -2608,7 +2614,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
 void
 i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 {
-	struct rte_eth_dev *dev;
+	rte_eth_tx_burst_t tx_pkt_burst;
 	uint16_t i;
 
 	if (!txq || !txq->sw_ring) {
@@ -2616,14 +2622,14 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 		return;
 	}
 
-	dev = &rte_eth_devices[txq->port_id];
+	tx_pkt_burst = rte_eth_get_tx_burst(txq->port_id);
 
 	/**
 	 *  vPMD tx will not set sw_ring's mbuf to NULL after free,
 	 *  so need to free remains more carefully.
 	 */
 #ifdef CC_AVX512_SUPPORT
-	if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx512) {
+	if (tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx512)) {
 		struct i40e_vec_tx_entry *swr = (void *)txq->sw_ring;
 
 		i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
@@ -2641,8 +2647,8 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 		return;
 	}
 #endif
-	if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
-			dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+	if (tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2) ||
+			tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec)) {
 		i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
 		if (txq->tx_tail < i) {
 			for (; i < txq->nb_tx_desc; i++) {
@@ -3564,49 +3570,55 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
 				PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx512;
+				rte_eth_set_tx_burst(dev->data->port_id,
+					_RTE_ETH_FUNC(
+						i40e_xmit_pkts_vec_avx512));
 #endif
 			} else {
 				PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
 					     ad->tx_use_avx2 ? "avx2 " : "",
 					     dev->data->port_id);
-				dev->tx_pkt_burst = ad->tx_use_avx2 ?
-						    i40e_xmit_pkts_vec_avx2 :
-						    i40e_xmit_pkts_vec;
+				rte_eth_set_tx_burst(dev->data->port_id,
+					ad->tx_use_avx2 ?
+					_RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2) :
+					_RTE_ETH_FUNC(i40e_xmit_pkts_vec));
 			}
 #else /* RTE_ARCH_X86 */
 			PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
 				     dev->data->port_id);
-			dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+			rte_eth_set_tx_burst(dev->data->port_id,
+				_RTE_ETH_FUNC(i40e_xmit_pkts_vec));
 #endif /* RTE_ARCH_X86 */
 		} else {
 			PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
-			dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+			rte_eth_set_tx_burst(dev->data->port_id,
+				_RTE_ETH_FUNC(i40e_xmit_pkts_simple));
 		}
 		dev->tx_pkt_prepare = i40e_simple_prep_pkts;
 	} else {
 		PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
-		dev->tx_pkt_burst = i40e_xmit_pkts;
+		rte_eth_set_tx_burst(dev->data->port_id,
+			_RTE_ETH_FUNC(i40e_xmit_pkts));
 		dev->tx_pkt_prepare = i40e_prep_pkts;
 	}
 }
 
 static const struct {
-	eth_tx_burst_t pkt_burst;
+	rte_eth_tx_burst_t pkt_burst;
 	const char *info;
 } i40e_tx_burst_infos[] = {
-	{ i40e_xmit_pkts_simple,   "Scalar Simple" },
-	{ i40e_xmit_pkts,          "Scalar" },
+	{ _RTE_ETH_FUNC(i40e_xmit_pkts_simple),   "Scalar Simple" },
+	{ _RTE_ETH_FUNC(i40e_xmit_pkts),          "Scalar" },
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
-	{ i40e_xmit_pkts_vec_avx512, "Vector AVX512" },
+	{ _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx512), "Vector AVX512" },
 #endif
-	{ i40e_xmit_pkts_vec_avx2, "Vector AVX2" },
-	{ i40e_xmit_pkts_vec,      "Vector SSE" },
+	{ _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2), "Vector AVX2" },
+	{ _RTE_ETH_FUNC(i40e_xmit_pkts_vec),      "Vector SSE" },
 #elif defined(RTE_ARCH_ARM64)
-	{ i40e_xmit_pkts_vec,      "Vector Neon" },
+	{ _RTE_ETH_FUNC(i40e_xmit_pkts_vec),      "Vector Neon" },
 #elif defined(RTE_ARCH_PPC_64)
-	{ i40e_xmit_pkts_vec,      "Vector AltiVec" },
+	{ _RTE_ETH_FUNC(i40e_xmit_pkts_vec),      "Vector AltiVec" },
 #endif
 };
 
@@ -3614,7 +3626,7 @@ int
 i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 		       struct rte_eth_burst_mode *mode)
 {
-	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+	rte_eth_tx_burst_t pkt_burst = rte_eth_get_tx_burst(dev->data->port_id);
 	int ret = -EINVAL;
 	unsigned int i;
 
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index beeeaae78d..c51d5db2f7 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -203,9 +203,7 @@ void i40e_dev_tx_queue_release(void *txq);
 _RTE_ETH_RX_PROTO(i40e_recv_pkts);
 _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts);
 
-uint16_t i40e_xmit_pkts(void *tx_queue,
-			struct rte_mbuf **tx_pkts,
-			uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(i40e_xmit_pkts);
 uint16_t i40e_simple_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			       uint16_t nb_pkts);
 uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -236,8 +234,10 @@ int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
 int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
 void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+
 uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-				   uint16_t nb_pkts);
+					uint16_t nb_pkts);
+
 void i40e_set_rx_function(struct rte_eth_dev *dev);
 void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
 			       struct i40e_tx_queue *txq);
@@ -248,16 +248,14 @@ void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
 _RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx2);
 _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx2);
 
-uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
-	uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(i40e_xmit_pkts_vec_avx2);
+
 int i40e_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
 
 _RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx512);
 _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx512);
 
-uint16_t i40e_xmit_pkts_vec_avx512(void *tx_queue,
-				   struct rte_mbuf **tx_pkts,
-				   uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(i40e_xmit_pkts_vec_avx512);
 
 /* For each value it means, datasheet of hardware can tell more details
  *
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index 5c03d16644..f011088ad7 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -824,7 +824,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_pkts;
 }
 
-uint16_t
+static inline uint16_t
 i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 		   uint16_t nb_pkts)
 {
@@ -845,3 +845,5 @@ i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	return nb_tx;
 }
+
+_RTE_ETH_TX_DEF(i40e_xmit_pkts_vec_avx2)
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 96ff3d60c3..e37dc5a401 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -1120,7 +1120,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_pkts;
 }
 
-uint16_t
+static inline uint16_t
 i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 			  uint16_t nb_pkts)
 {
@@ -1141,3 +1141,5 @@ i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	return nb_tx;
 }
+
+_RTE_ETH_TX_DEF(i40e_xmit_pkts_vec_avx512)
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index 9d32a5c85d..f488ef51cd 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -475,6 +475,8 @@ i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
 	return 0;
 }
 
+static _RTE_ETH_TX_DEF(i40e_vf_representor_tx_burst)
+
 int
 i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
 {
@@ -505,7 +507,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
 	 */
 	rte_eth_set_rx_burst(ethdev->data->port_id,
 			_RTE_ETH_FUNC(i40e_vf_representor_rx_burst));
-	ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
+	rte_eth_set_tx_burst(ethdev->data->port_id,
+			_RTE_ETH_FUNC(i40e_vf_representor_tx_burst));
 
 	vf = &pf->vfs[representor->vf_id];
 
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 58a4204621..f9a917a13f 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -50,6 +50,8 @@ ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
 	return 0;
 }
 
+static _RTE_ETH_TX_DEF(ice_dcf_xmit_pkts)
+
 static int
 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
 {
@@ -1043,7 +1045,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
 	eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
 	rte_eth_set_rx_burst(eth_dev->data->port_id,
 			_RTE_ETH_FUNC(ice_dcf_recv_pkts));
-	eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
+	rte_eth_set_tx_burst(eth_dev->data->port_id,
+			_RTE_ETH_FUNC(ice_dcf_xmit_pkts));
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
index 8136169ebd..8b46c9614a 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -28,6 +28,8 @@ ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
 	return 0;
 }
 
+static _RTE_ETH_TX_DEF(ice_dcf_vf_repr_tx_burst)
+
 static int
 ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
 {
@@ -417,7 +419,8 @@ ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param)
 
 	rte_eth_set_rx_burst(vf_rep_eth_dev->data->port_id,
 			_RTE_ETH_FUNC(ice_dcf_vf_repr_rx_burst));
-	vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
+	rte_eth_set_tx_burst(vf_rep_eth_dev->data->port_id,
+			_RTE_ETH_FUNC(ice_dcf_vf_repr_tx_burst));
 
 	vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
 	vf_rep_eth_dev->data->representor_id = repr->vf_id;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 4d67a2dddf..9558455f7f 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1997,7 +1997,7 @@ ice_dev_init(struct rte_eth_dev *dev)
 	dev->rx_descriptor_status = ice_rx_descriptor_status;
 	dev->tx_descriptor_status = ice_tx_descriptor_status;
 	rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts));
-	dev->tx_pkt_burst = ice_xmit_pkts;
+	rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts));
 	dev->tx_pkt_prepare = ice_prep_pkts;
 
 	/* for secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 2cc411d315..e97564fdd6 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -2558,7 +2558,7 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
 	return count;
 }
 
-uint16_t
+static inline uint16_t
 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct ice_tx_queue *txq;
@@ -2775,6 +2775,8 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	return nb_tx;
 }
 
+_RTE_ETH_TX_DEF(ice_xmit_pkts)
+
 static __rte_always_inline int
 ice_tx_free_bufs(struct ice_tx_queue *txq)
 {
@@ -3064,6 +3066,8 @@ ice_xmit_pkts_simple(void *tx_queue,
 	return nb_tx;
 }
 
+static _RTE_ETH_TX_DEF(ice_xmit_pkts_simple)
+
 void __rte_cold
 ice_set_rx_function(struct rte_eth_dev *dev)
 {
@@ -3433,14 +3437,15 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 				PMD_DRV_LOG(NOTICE,
 					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst =
-					ice_xmit_pkts_vec_avx512_offload;
+				rte_eth_set_tx_burst(dev->data->port_id,
+					_RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload));
 				dev->tx_pkt_prepare = ice_prep_pkts;
 			} else {
 				PMD_DRV_LOG(NOTICE,
 					    "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+				rte_eth_set_tx_burst(dev->data->port_id,
+					_RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512));
 			}
 #endif
 		} else {
@@ -3448,16 +3453,17 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 				PMD_DRV_LOG(NOTICE,
 					    "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst =
-					ice_xmit_pkts_vec_avx2_offload;
+				rte_eth_set_tx_burst(dev->data->port_id,
+					_RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2_offload));
 				dev->tx_pkt_prepare = ice_prep_pkts;
 			} else {
 				PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
 					    ad->tx_use_avx2 ? "avx2 " : "",
 					    dev->data->port_id);
-				dev->tx_pkt_burst = ad->tx_use_avx2 ?
-						    ice_xmit_pkts_vec_avx2 :
-						    ice_xmit_pkts_vec;
+				rte_eth_set_tx_burst(dev->data->port_id,
+					ad->tx_use_avx2 ?
+					_RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2) :
+					_RTE_ETH_FUNC(ice_xmit_pkts_vec));
 			}
 		}
 
@@ -3467,28 +3473,31 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 
 	if (ad->tx_simple_allowed) {
 		PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
-		dev->tx_pkt_burst = ice_xmit_pkts_simple;
+		rte_eth_set_tx_burst(dev->data->port_id,
+			_RTE_ETH_FUNC(ice_xmit_pkts_simple));
 		dev->tx_pkt_prepare = NULL;
 	} else {
 		PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
-		dev->tx_pkt_burst = ice_xmit_pkts;
+		rte_eth_set_tx_burst(dev->data->port_id,
+			_RTE_ETH_FUNC(ice_xmit_pkts));
 		dev->tx_pkt_prepare = ice_prep_pkts;
 	}
 }
 
 static const struct {
-	eth_tx_burst_t pkt_burst;
+	rte_eth_tx_burst_t pkt_burst;
 	const char *info;
 } ice_tx_burst_infos[] = {
-	{ ice_xmit_pkts_simple,   "Scalar Simple" },
-	{ ice_xmit_pkts,          "Scalar" },
+	{ _RTE_ETH_FUNC(ice_xmit_pkts_simple),   "Scalar Simple" },
+	{ _RTE_ETH_FUNC(ice_xmit_pkts),          "Scalar" },
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
-	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
-	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
+	{ _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512), "Vector AVX512" },
+	{ _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload),
+		"Offload Vector AVX512" },
 #endif
-	{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
-	{ ice_xmit_pkts_vec,      "Vector SSE" },
+	{ _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2), "Vector AVX2" },
+	{ _RTE_ETH_FUNC(ice_xmit_pkts_vec),      "Vector SSE" },
 #endif
 };
 
@@ -3496,7 +3505,7 @@ int
 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 		      struct rte_eth_burst_mode *mode)
 {
-	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+	rte_eth_tx_burst_t pkt_burst = rte_eth_get_tx_burst(dev->data->port_id);
 	int ret = -EINVAL;
 	unsigned int i;
 
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index be8d43a591..3c06406204 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -213,8 +213,7 @@ void ice_free_queues(struct rte_eth_dev *dev);
 int ice_fdir_setup_tx_resources(struct ice_pf *pf);
 int ice_fdir_setup_rx_resources(struct ice_pf *pf);
 _RTE_ETH_RX_PROTO(ice_recv_pkts);
-uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-		       uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts);
 void ice_set_rx_function(struct rte_eth_dev *dev);
 uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 		       uint16_t nb_pkts);
@@ -245,29 +244,24 @@ int ice_txq_vec_setup(struct ice_tx_queue *txq);
 _RTE_ETH_RX_PROTO(ice_recv_pkts_vec);
 _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec);
 
-uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-			   uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec);
 
 _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2);
 _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2_offload);
 _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2);
 _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2_offload);
 
-uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
-				uint16_t nb_pkts);
-uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
-					uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx2);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx2_offload);
 
 _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512);
 _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512_offload);
 _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512);
 _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512_offload);
 
-uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
-				  uint16_t nb_pkts);
-uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
-					  struct rte_mbuf **tx_pkts,
-					  uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx512);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx512_offload);
+
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 29b9b57f9f..a15a673767 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -985,16 +985,20 @@ ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-uint16_t
+static inline uint16_t
 ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 		       uint16_t nb_pkts)
 {
 	return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-uint16_t
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx2)
+
+static inline uint16_t
 ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
 			       uint16_t nb_pkts)
 {
 	return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true);
 }
+
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx2_offload)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 30c44c8918..d2fdd64cf8 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -1235,7 +1235,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_pkts;
 }
 
-uint16_t
+static inline uint16_t
 ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 uint16_t nb_pkts)
 {
@@ -1257,7 +1257,9 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-uint16_t
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx512)
+
+static inline uint16_t
 ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
 				 uint16_t nb_pkts)
 {
@@ -1279,3 +1281,5 @@ ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	return nb_tx;
 }
+
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx512_offload)
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 2d8ef7dc8a..f7604f960b 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -195,10 +195,11 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
 	i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
 
 #ifdef CC_AVX512_SUPPORT
-	struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
+	rte_eth_tx_burst_t tx_pkt_burst =
+		rte_eth_get_tx_burst(txq->vsi->adapter->pf.dev_data->port_id);
 
-	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
-	    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
+	if (tx_pkt_burst == _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512) ||
+	    tx_pkt_burst == _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload)) {
 		struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
 
 		if (txq->tx_tail < i) {
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 2caf1c6941..344bd11508 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -758,7 +758,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_pkts;
 }
 
-uint16_t
+static inline uint16_t
 ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		  uint16_t nb_pkts)
 {
@@ -779,6 +779,8 @@ ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec)
+
 int __rte_cold
 ice_rxq_vec_setup(struct ice_rx_queue *rxq)
 {
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 8b7d1e8840..45d1160465 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -1633,6 +1633,100 @@ rte_eth_rx_burst_t rte_eth_get_rx_burst(uint16_t port_id);
 __rte_experimental
 int rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf);
 
+/**
+ * @internal
+ * Helper routine for eth driver tx_burst API.
+ * Should be called as first thing on entrance to the PMD's rte_eth_tx_bulk
+ * implementation.
+ * Does necessary checks and post-processing - invokes TX callbacks if any,
+ * tracing, etc.
+ *
+ * @param port_id
+ *  The port identifier of the Ethernet device.
+ * @param queue_id
+ *  The index of the transmit queues.
+ * @param tx_pkts
+ *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
+ *   which contain the output packets.
+ * @param nb_pkts
+ *   The pointer to the maximum number of packets to transmit.
+ *
+ * @return
+ *  Pointer to device TX queue structure on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
+		struct rte_mbuf **tx_pkts, uint16_t *nb_pkts)
+{
+	uint16_t n;
+	struct rte_eth_dev *dev;
+
+	n = *nb_pkts;
+	dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
+
+	if (queue_id >= dev->data->nb_tx_queues) {
+		RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+		return NULL;
+	}
+#endif
+
+#ifdef RTE_ETHDEV_RXTX_CALLBACKS
+	struct rte_eth_rxtx_callback *cb;
+
+	/* __ATOMIC_RELEASE memory order was used when the
+	 * call back was inserted into the list.
+	 * Since there is a clear dependency between loading
+	 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+	 * not required.
+	 */
+	cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
+				__ATOMIC_RELAXED);
+
+	if (unlikely(cb != NULL)) {
+		do {
+			n = cb->fn.tx(port_id, queue_id, tx_pkts, n, cb->param);
+			cb = cb->next;
+		} while (cb != NULL);
+	}
+
+	*nb_pkts = n;
+#endif
+
+	rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, n);
+	return dev->data->tx_queues[queue_id];
+}
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD tx_burst functions.
+ */
+#define _RTE_ETH_TX_PROTO(fn) \
+	uint16_t _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \
+			struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD tx_burst functions.
+ */
+#define _RTE_ETH_TX_DEF(fn) \
+_RTE_ETH_TX_PROTO(fn) \
+{ \
+	void *txq = _rte_eth_tx_prolog(port_id, queue_id, tx_pkts, &nb_pkts); \
+	if (txq == NULL) \
+		return 0; \
+	return fn(txq, tx_pkts, nb_pkts); \
+}
+
+__rte_experimental
+rte_eth_tx_burst_t rte_eth_get_tx_burst(uint16_t port_id);
+
+__rte_experimental
+int rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index c126626281..1165e0bb32 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
 	eth_dev->device = NULL;
 	eth_dev->process_private = NULL;
 	eth_dev->intr_handle = NULL;
-	eth_dev->tx_pkt_burst = NULL;
 	eth_dev->tx_pkt_prepare = NULL;
 	eth_dev->rx_queue_count = NULL;
 	eth_dev->rx_descriptor_done = NULL;
@@ -6358,3 +6357,25 @@ rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf)
 	rte_eth_burst_api[port_id].rx_pkt_burst = rxf;
 	return 0;
 }
+
+__rte_experimental
+rte_eth_tx_burst_t
+rte_eth_get_tx_burst(uint16_t port_id)
+{
+	if (port_id >= RTE_DIM(rte_eth_burst_api)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_eth_burst_api[port_id].tx_pkt_burst;
+}
+
+__rte_experimental
+int
+rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf)
+{
+	if (port_id >= RTE_DIM(rte_eth_burst_api))
+		return -EINVAL;
+
+	rte_eth_burst_api[port_id].tx_pkt_burst = txf;
+	return 0;
+}
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index a155f255ad..3eac61a289 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -5226,42 +5226,11 @@ static inline uint16_t
 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_ETHDEV_DEBUG_TX
-	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
-	RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
-
-	if (queue_id >= dev->data->nb_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+	if (port_id >= RTE_MAX_ETHPORTS)
 		return 0;
-	}
-#endif
-
-#ifdef RTE_ETHDEV_RXTX_CALLBACKS
-	struct rte_eth_rxtx_callback *cb;
 
-	/* __ATOMIC_RELEASE memory order was used when the
-	 * call back was inserted into the list.
-	 * Since there is a clear dependency between loading
-	 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
-	 * not required.
-	 */
-	cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
-				__ATOMIC_RELAXED);
-
-	if (unlikely(cb != NULL)) {
-		do {
-			nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
-					cb->param);
-			cb = cb->next;
-		} while (cb != NULL);
-	}
-#endif
-
-	rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts,
-		nb_pkts);
-	return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
+	return rte_eth_burst_api[port_id].tx_pkt_burst(port_id, queue_id,
+			tx_pkts, nb_pkts);
 }
 
 /**
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index 94ffa071e3..ace77db1b6 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -115,7 +115,6 @@ struct rte_eth_rxtx_callback {
  * process, while the actual configuration data for the device is shared.
  */
 struct rte_eth_dev {
-	eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
 	eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
 
 	eth_rx_queue_count_t       rx_queue_count; /**< Get the number of used RX descriptors. */
diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
index 2698c75940..8f8a6b4a5a 100644
--- a/lib/ethdev/version.map
+++ b/lib/ethdev/version.map
@@ -253,7 +253,9 @@ EXPERIMENTAL {
 	# added in 21.11
 	rte_eth_burst_api;
 	rte_eth_get_rx_burst;
+	rte_eth_get_tx_burst;
 	rte_eth_set_rx_burst;
+	rte_eth_set_tx_burst;
 };
 
 INTERNAL {
-- 
2.26.3


  parent reply	other threads:[~2021-08-20 16:29 UTC|newest]

Thread overview: 112+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-20 16:28 [dpdk-dev] [RFC 0/7] hide eth dev related structures Konstantin Ananyev
2021-08-20 16:28 ` [dpdk-dev] [RFC 1/7] eth: move ethdev 'burst' API into separate structure Konstantin Ananyev
2021-08-20 16:28 ` [dpdk-dev] [RFC 2/7] eth: make drivers to use new API for Rx Konstantin Ananyev
2021-09-06 18:41   ` Ferruh Yigit
2021-09-14 14:28     ` Ananyev, Konstantin
2021-08-20 16:28 ` Konstantin Ananyev [this message]
2021-08-20 16:28 ` [dpdk-dev] [RFC 4/7] eth: make drivers to use new API for Tx prepare Konstantin Ananyev
2021-08-20 16:28 ` [dpdk-dev] [RFC 5/7] eth: make drivers to use new API to obtain descriptor status Konstantin Ananyev
2021-08-20 16:28 ` [dpdk-dev] [RFC 6/7] eth: make drivers to use new API for Rx queue count Konstantin Ananyev
2021-08-20 16:28 ` [dpdk-dev] [RFC 7/7] eth: hide eth dev related structures Konstantin Ananyev
2021-08-26 12:37 ` [dpdk-dev] [RFC 0/7] " Jerin Jacob
2021-09-06 18:09   ` Ferruh Yigit
2021-09-14 13:33   ` Ananyev, Konstantin
2021-09-15  9:45     ` Jerin Jacob
2021-09-22 15:08       ` Ananyev, Konstantin
2021-09-27 16:14         ` Jerin Jacob
2021-09-28  9:37           ` Ananyev, Konstantin
2021-09-22 14:09 ` [dpdk-dev] [RFC v2 0/5] " Konstantin Ananyev
2021-09-22 14:09   ` [dpdk-dev] [RFC v2 1/5] ethdev: allocate max space for internal queue array Konstantin Ananyev
2021-09-22 14:09   ` [dpdk-dev] [RFC v2 2/5] ethdev: change input parameters for rx_queue_count Konstantin Ananyev
2021-09-23  5:51     ` Wang, Haiyue
2021-09-22 14:09   ` [dpdk-dev] [RFC v2 3/5] ethdev: copy ethdev 'burst' API into separate structure Konstantin Ananyev
2021-09-23  5:58     ` Wang, Haiyue
2021-09-27 18:01       ` Jerin Jacob
2021-09-28  9:42         ` Ananyev, Konstantin
2021-09-22 14:09   ` [dpdk-dev] [RFC v2 4/5] ethdev: make burst functions to use new flat array Konstantin Ananyev
2021-09-22 14:09   ` [dpdk-dev] [RFC v2 5/5] ethdev: hide eth dev related structures Konstantin Ananyev
2021-10-01 14:02   ` [dpdk-dev] [PATCH v3 0/7] " Konstantin Ananyev
2021-10-01 14:02     ` [dpdk-dev] [PATCH v3 1/7] ethdev: allocate max space for internal queue array Konstantin Ananyev
2021-10-01 16:48       ` Ferruh Yigit
2021-10-01 14:02     ` [dpdk-dev] [PATCH v3 2/7] ethdev: change input parameters for rx_queue_count Konstantin Ananyev
2021-10-01 14:02     ` [dpdk-dev] [PATCH v3 3/7] ethdev: copy ethdev 'fast' API into separate structure Konstantin Ananyev
2021-10-01 14:02     ` [dpdk-dev] [PATCH v3 4/7] ethdev: make burst functions to use new flat array Konstantin Ananyev
2021-10-01 16:46       ` Ferruh Yigit
2021-10-01 17:40         ` Ananyev, Konstantin
2021-10-04  8:46           ` Ferruh Yigit
2021-10-04  9:20             ` Ananyev, Konstantin
2021-10-04 10:13               ` Ferruh Yigit
2021-10-04 11:17                 ` Ananyev, Konstantin
2021-10-01 14:02     ` [dpdk-dev] [PATCH v3 5/7] ethdev: add API to retrieve multiple ethernet addresses Konstantin Ananyev
2021-10-01 14:02     ` [dpdk-dev] [PATCH v3 6/7] ethdev: remove legacy Rx descriptor done API Konstantin Ananyev
2021-10-01 14:02     ` [dpdk-dev] [PATCH v3 7/7] ethdev: hide eth dev related structures Konstantin Ananyev
2021-10-01 16:53       ` Ferruh Yigit
2021-10-01 17:04         ` Ferruh Yigit
2021-10-01 17:02     ` [dpdk-dev] [PATCH v3 0/7] " Ferruh Yigit
2021-10-04 13:55     ` [dpdk-dev] [PATCH v4 " Konstantin Ananyev
2021-10-04 13:55       ` [dpdk-dev] [PATCH v4 1/7] ethdev: allocate max space for internal queue array Konstantin Ananyev
2021-10-05 12:09         ` Thomas Monjalon
2021-10-05 16:45           ` Ananyev, Konstantin
2021-10-05 16:49             ` Thomas Monjalon
2021-10-05 12:21         ` Thomas Monjalon
2021-10-04 13:55       ` [dpdk-dev] [PATCH v4 2/7] ethdev: change input parameters for rx_queue_count Konstantin Ananyev
2021-10-04 13:55       ` [dpdk-dev] [PATCH v4 3/7] ethdev: copy ethdev 'fast' API into separate structure Konstantin Ananyev
2021-10-05 13:09         ` Thomas Monjalon
2021-10-05 16:41           ` Ananyev, Konstantin
2021-10-05 16:48             ` Thomas Monjalon
2021-10-05 17:04               ` Ananyev, Konstantin
2021-10-04 13:56       ` [dpdk-dev] [PATCH v4 4/7] ethdev: make burst functions to use new flat array Konstantin Ananyev
2021-10-05  9:54         ` David Marchand
2021-10-05 10:13           ` Ananyev, Konstantin
2021-10-04 13:56       ` [dpdk-dev] [PATCH v4 5/7] ethdev: add API to retrieve multiple ethernet addresses Konstantin Ananyev
2021-10-05 13:13         ` Thomas Monjalon
2021-10-05 16:35           ` Ananyev, Konstantin
2021-10-05 16:45             ` Thomas Monjalon
2021-10-05 17:12               ` Ananyev, Konstantin
2021-10-05 17:41                 ` Thomas Monjalon
2021-10-04 13:56       ` [dpdk-dev] [PATCH v4 6/7] ethdev: remove legacy Rx descriptor done API Konstantin Ananyev
2021-10-05 13:14         ` Thomas Monjalon
2021-10-05 16:21           ` Ananyev, Konstantin
2021-10-04 13:56       ` [dpdk-dev] [PATCH v4 7/7] ethdev: hide eth dev related structures Konstantin Ananyev
2021-10-05 10:04         ` David Marchand
2021-10-05 10:43           ` Ferruh Yigit
2021-10-05 11:37             ` David Marchand
2021-10-05 15:57               ` Ananyev, Konstantin
2021-10-05 13:24         ` Thomas Monjalon
2021-10-05 16:19           ` Ananyev, Konstantin
2021-10-05 16:25             ` Thomas Monjalon
2021-10-06 16:42       ` [dpdk-dev] [PATCH v4 0/7] " Ali Alnubani
2021-10-06 17:26         ` Ali Alnubani
2021-10-07 11:27       ` [dpdk-dev] [PATCH v5 " Konstantin Ananyev
2021-10-07 11:27         ` [dpdk-dev] [PATCH v5 1/7] ethdev: remove legacy Rx descriptor done API Konstantin Ananyev
2021-10-07 11:27         ` [dpdk-dev] [PATCH v5 2/7] ethdev: allocate max space for internal queue array Konstantin Ananyev
2021-10-11  9:20           ` Andrew Rybchenko
2021-10-11 16:25             ` Ananyev, Konstantin
2021-10-11 17:15               ` Andrew Rybchenko
2021-10-11 23:06                 ` Ananyev, Konstantin
2021-10-12  5:47                   ` Andrew Rybchenko
2021-10-07 11:27         ` [dpdk-dev] [PATCH v5 3/7] ethdev: change input parameters for rx_queue_count Konstantin Ananyev
2021-10-11  8:06           ` Andrew Rybchenko
2021-10-12 17:59           ` Hyong Youb Kim (hyonkim)
2021-10-07 11:27         ` [dpdk-dev] [PATCH v5 4/7] ethdev: copy fast-path API into separate structure Konstantin Ananyev
2021-10-09 12:05           ` fengchengwen
2021-10-11  1:18             ` fengchengwen
2021-10-11  8:39               ` Andrew Rybchenko
2021-10-11 15:24               ` Ananyev, Konstantin
2021-10-11  8:35             ` Andrew Rybchenko
2021-10-11 15:15             ` Ananyev, Konstantin
2021-10-11  8:25           ` Andrew Rybchenko
2021-10-11 16:52             ` Ananyev, Konstantin
2021-10-11 17:22               ` Andrew Rybchenko
2021-10-07 11:27         ` [dpdk-dev] [PATCH v5 5/7] ethdev: make fast-path functions to use new flat array Konstantin Ananyev
2021-10-11  9:02           ` Andrew Rybchenko
2021-10-11 15:47             ` Ananyev, Konstantin
2021-10-11 17:03               ` Andrew Rybchenko
2021-10-07 11:27         ` [dpdk-dev] [PATCH v5 6/7] ethdev: add API to retrieve multiple ethernet addresses Konstantin Ananyev
2021-10-11  9:09           ` Andrew Rybchenko
2021-10-07 11:27         ` [dpdk-dev] [PATCH v5 7/7] ethdev: hide eth dev related structures Konstantin Ananyev
2021-10-11  9:20           ` Andrew Rybchenko
2021-10-11 15:54             ` Ananyev, Konstantin
2021-10-11 17:04               ` Andrew Rybchenko
2021-10-08 18:13         ` [dpdk-dev] [PATCH v5 0/7] " Slava Ovsiienko
2021-10-11  9:22         ` Andrew Rybchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210820162834.12544-4-konstantin.ananyev@intel.com \
    --to=konstantin.ananyev@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=techboard@dpdk.org \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.