All of lore.kernel.org
 help / color / mirror / Atom feed
From: Igor Russkikh <igor.russkikh@aquantia.com>
To: dev@dpdk.org
Cc: pavel.belous@aquantia.com, igor.russkikh@aquantia.com,
	Pavel Belous <Pavel.Belous@aquantia.com>
Subject: [PATCH v3 10/22] net/atlantic: TX side structures and implementation
Date: Sat, 29 Sep 2018 13:30:24 +0300	[thread overview]
Message-ID: <88d51a926b48f55695fab80c120913ee88bd6122.1538215990.git.igor.russkikh@aquantia.com> (raw)
In-Reply-To: <cover.1538215990.git.igor.russkikh@aquantia.com>

From: Pavel Belous <Pavel.Belous@aquantia.com>

Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: Pavel Belous <Pavel.Belous@aquantia.com>
---
 drivers/net/atlantic/atl_ethdev.c |  28 ++
 drivers/net/atlantic/atl_ethdev.h |   7 +
 drivers/net/atlantic/atl_rxtx.c   | 530 +++++++++++++++++++++++++++++++++++++-
 3 files changed, 556 insertions(+), 9 deletions(-)

diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index cdcfc5ec02c2..6a00277c3c8e 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -79,12 +79,27 @@ static struct rte_pci_driver rte_atl_pmd = {
 			| DEV_RX_OFFLOAD_TCP_CKSUM \
 			| DEV_RX_OFFLOAD_JUMBO_FRAME)
 
+#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
+			| DEV_TX_OFFLOAD_IPV4_CKSUM \
+			| DEV_TX_OFFLOAD_UDP_CKSUM \
+			| DEV_TX_OFFLOAD_TCP_CKSUM \
+			| DEV_TX_OFFLOAD_TCP_TSO \
+			| DEV_TX_OFFLOAD_MULTI_SEGS)
+
 static const struct rte_eth_desc_lim rx_desc_lim = {
 	.nb_max = ATL_MAX_RING_DESC,
 	.nb_min = ATL_MIN_RING_DESC,
 	.nb_align = ATL_RXD_ALIGN,
 };
 
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = ATL_MAX_RING_DESC,
+	.nb_min = ATL_MIN_RING_DESC,
+	.nb_align = ATL_TXD_ALIGN,
+	.nb_seg_max = ATL_TX_MAX_SEG,
+	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
+};
+
 static const struct eth_dev_ops atl_eth_dev_ops = {
 	.dev_configure	      = atl_dev_configure,
 	.dev_start	      = atl_dev_start,
@@ -101,6 +116,11 @@ static const struct eth_dev_ops atl_eth_dev_ops = {
 	.rx_queue_stop	      = atl_rx_queue_stop,
 	.rx_queue_setup       = atl_rx_queue_setup,
 	.rx_queue_release     = atl_rx_queue_release,
+
+	.tx_queue_start	      = atl_tx_queue_start,
+	.tx_queue_stop	      = atl_tx_queue_stop,
+	.tx_queue_setup       = atl_tx_queue_setup,
+	.tx_queue_release     = atl_tx_queue_release,
 };
 
 static inline int32_t
@@ -369,11 +389,19 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
 
+	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
+
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
 	};
 
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
+	};
+
 	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 static const uint32_t *
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
index acb3066c189f..cafe37cdf963 100644
--- a/drivers/net/atlantic/atl_ethdev.h
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -28,12 +28,17 @@ struct atl_adapter {
  * RX/TX function prototypes
  */
 void atl_rx_queue_release(void *rxq);
+void atl_tx_queue_release(void *txq);
 
 int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		uint16_t nb_rx_desc, unsigned int socket_id,
 		const struct rte_eth_rxconf *rx_conf,
 		struct rte_mempool *mb_pool);
 
+int atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+		uint16_t nb_tx_desc, unsigned int socket_id,
+		const struct rte_eth_txconf *tx_conf);
+
 int atl_rx_init(struct rte_eth_dev *dev);
 int atl_tx_init(struct rte_eth_dev *dev);
 
@@ -44,6 +49,8 @@ void atl_free_queues(struct rte_eth_dev *dev);
 int atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 
+int atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
 uint16_t atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 0ce41aaf1780..172d5fb232f2 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -4,6 +4,7 @@
 
 #include <rte_malloc.h>
 #include <rte_ethdev_driver.h>
+#include <rte_net.h>
 
 #include "atl_ethdev.h"
 #include "atl_hw_regs.h"
@@ -13,6 +14,20 @@
 #include "hw_atl/hw_atl_b0.h"
 #include "hw_atl/hw_atl_b0_internal.h"
 
+#define ATL_TX_CKSUM_OFFLOAD_MASK (				 \
+	PKT_TX_IP_CKSUM |				 \
+	PKT_TX_L4_MASK |				 \
+	PKT_TX_TCP_SEG)
+
+#define ATL_TX_OFFLOAD_MASK (  \
+	PKT_TX_VLAN_PKT |				 \
+	PKT_TX_IP_CKSUM |				 \
+	PKT_TX_L4_MASK |				 \
+	PKT_TX_TCP_SEG)
+
+#define ATL_TX_OFFLOAD_NOTSUP_MASK \
+	(PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
+
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
  */
@@ -21,6 +36,15 @@ struct atl_rx_entry {
 };
 
 /**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct atl_tx_entry {
+	struct rte_mbuf *mbuf;
+	uint16_t next_id;
+	uint16_t last_id;
+};
+
+/**
  * Structure associated with each RX queue.
  */
 struct atl_rx_queue {
@@ -39,6 +63,22 @@ struct atl_rx_queue {
 	bool			l4_csum_enabled;
 };
 
+/**
+ * Structure associated with each TX queue.
+ */
+struct atl_tx_queue {
+	struct hw_atl_txd_s	*hw_ring;
+	uint64_t		hw_ring_phys_addr;
+	struct atl_tx_entry	*sw_ring;
+	uint16_t		nb_tx_desc;
+	uint16_t		tx_tail;
+	uint16_t		tx_head;
+	uint16_t		queue_id;
+	uint16_t		port_id;
+	uint16_t		tx_free_thresh;
+	uint16_t		tx_free;
+};
+
 static inline void
 atl_reset_rx_queue(struct atl_rx_queue *rxq)
 {
@@ -143,13 +183,141 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	return 0;
 }
 
+static inline void
+atl_reset_tx_queue(struct atl_tx_queue *txq)
+{
+	struct atl_tx_entry *tx_entry;
+	union hw_atl_txc_s *txc;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (!txq) {
+		PMD_DRV_LOG(ERR, "Pointer to txq is NULL");
+		return;
+	}
+
+	tx_entry = txq->sw_ring;
+
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		txc = (union hw_atl_txc_s *)&txq->hw_ring[i];
+		txc->flags1 = 0;
+		txc->flags2 = 2;
+	}
+
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		txq->hw_ring[i].dd = 1;
+		tx_entry[i].mbuf = NULL;
+	}
+
+	txq->tx_tail = 0;
+	txq->tx_head = 0;
+	txq->tx_free = txq->nb_tx_desc - 1;
+}
+
 int
-atl_tx_init(struct rte_eth_dev *eth_dev __rte_unused)
+atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+		   uint16_t nb_tx_desc, unsigned int socket_id,
+		   const struct rte_eth_txconf *tx_conf)
 {
+	struct atl_tx_queue *txq;
+	const struct rte_memzone *mz;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* make sure a valid number of descriptors have been requested */
+	if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE ||
+		nb_tx_desc > AQ_HW_MAX_TX_RING_SIZE) {
+		PMD_INIT_LOG(ERR, "Number of Tx descriptors must be "
+			"less than or equal to %d, "
+			"greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE,
+			AQ_HW_MIN_TX_RING_SIZE);
+		return -EINVAL;
+	}
+
+	/*
+	 * if this queue existed already, free the associated memory. The
+	 * queue cannot be reused in case we need to allocate memory on
+	 * different socket than was previously used.
+	 */
+	if (dev->data->tx_queues[tx_queue_id] != NULL) {
+		atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+		dev->data->tx_queues[tx_queue_id] = NULL;
+	}
+
+	/* allocate memory for the queue structure */
+	txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL) {
+		PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+		return -ENOMEM;
+	}
+
+	/* setup queue */
+	txq->nb_tx_desc = nb_tx_desc;
+	txq->port_id = dev->data->port_id;
+	txq->queue_id = tx_queue_id;
+	txq->tx_free_thresh = tx_conf->tx_free_thresh;
+
+
+	/* allocate memory for the software ring */
+	txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring",
+				nb_tx_desc * sizeof(struct atl_tx_entry),
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL) {
+		PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+		rte_free(txq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * allocate memory for the hardware descriptor ring. A memzone large
+	 * enough to hold the maximum ring size is requested to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id,
+				HW_ATL_B0_MAX_TXD * sizeof(struct hw_atl_txd_s),
+				128, socket_id);
+	if (mz == NULL) {
+		PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+		rte_free(txq->sw_ring);
+		rte_free(txq);
+		return -ENOMEM;
+	}
+	txq->hw_ring = mz->addr;
+	txq->hw_ring_phys_addr = mz->iova;
+
+	atl_reset_tx_queue(txq);
+
+	dev->data->tx_queues[tx_queue_id] = txq;
 	return 0;
 }
 
 int
+atl_tx_init(struct rte_eth_dev *eth_dev)
+{
+	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+	struct atl_tx_queue *txq;
+	uint64_t base_addr = 0;
+	int i = 0;
+	int err = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		txq = eth_dev->data->tx_queues[i];
+		base_addr = txq->hw_ring_phys_addr;
+
+		err = hw_atl_b0_hw_ring_tx_init(hw, base_addr,
+						txq->queue_id,
+						txq->nb_tx_desc, 0,
+						txq->port_id);
+	}
+
+	return err;
+}
+
+int
 atl_rx_init(struct rte_eth_dev *eth_dev)
 {
 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -313,15 +481,78 @@ atl_rx_queue_release(void *rx_queue)
 	}
 }
 
-uint16_t
-atl_prep_pkts(void *tx_queue __rte_unused,
-	      struct rte_mbuf **tx_pkts __rte_unused,
-	      uint16_t nb_pkts __rte_unused)
+static void
+atl_tx_queue_release_mbufs(struct atl_tx_queue *txq)
 {
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (txq->sw_ring != NULL) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+				txq->sw_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+int
+atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (tx_queue_id < dev->data->nb_tx_queues) {
+		hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id);
+
+		rte_wmb();
+		hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id);
+		dev->data->tx_queue_state[tx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+	} else {
+		return -1;
+	}
+
+	return 0;
+}
+
+int
+atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct atl_tx_queue *txq;
+
+	PMD_INIT_FUNC_TRACE();
+
+	txq = dev->data->tx_queues[tx_queue_id];
+
+	hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id);
+
+	atl_tx_queue_release_mbufs(txq);
+	atl_reset_tx_queue(txq);
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
 	return 0;
 }
 
 void
+atl_tx_queue_release(void *tx_queue)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	if (tx_queue != NULL) {
+		struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+
+		atl_tx_queue_release_mbufs(txq);
+		rte_free(txq->sw_ring);
+		rte_free(txq);
+	}
+}
+
+void
 atl_free_queues(struct rte_eth_dev *dev)
 {
 	unsigned int i;
@@ -333,6 +564,12 @@ atl_free_queues(struct rte_eth_dev *dev)
 		dev->data->rx_queues[i] = 0;
 	}
 	dev->data->nb_rx_queues = 0;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		atl_tx_queue_release(dev->data->tx_queues[i]);
+		dev->data->tx_queues[i] = 0;
+	}
+	dev->data->nb_tx_queues = 0;
 }
 
 int
@@ -342,6 +579,13 @@ atl_start_queues(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		if (atl_tx_queue_start(dev, i) != 0) {
+			PMD_DRV_LOG(ERR, "Start Tx queue %d failed", i);
+			return -1;
+		}
+	}
+
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		if (atl_rx_queue_start(dev, i) != 0) {
 			PMD_DRV_LOG(ERR, "Start Rx queue %d failed", i);
@@ -359,6 +603,13 @@ atl_stop_queues(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		if (atl_tx_queue_stop(dev, i) != 0) {
+			PMD_DRV_LOG(ERR, "Stop Tx queue %d failed", i);
+			return -1;
+		}
+	}
+
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		if (atl_rx_queue_stop(dev, i) != 0) {
 			PMD_DRV_LOG(ERR, "Stop Rx queue %d failed", i);
@@ -369,6 +620,47 @@ atl_stop_queues(struct rte_eth_dev *dev)
 	return 0;
 }
 
+uint16_t
+atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	int i, ret;
+	uint64_t ol_flags;
+	struct rte_mbuf *m;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < nb_pkts; i++) {
+		m = tx_pkts[i];
+		ol_flags = m->ol_flags;
+
+		if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
+			rte_errno = -EINVAL;
+			return i;
+		}
+
+		if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
+			rte_errno = -ENOTSUP;
+			return i;
+		}
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+		ret = rte_validate_tx_offload(m);
+		if (ret != 0) {
+			rte_errno = ret;
+			return i;
+		}
+#endif
+		ret = rte_net_intel_cksum_prepare(m);
+		if (ret != 0) {
+			rte_errno = ret;
+			return i;
+		}
+	}
+
+	return i;
+}
+
 static uint64_t
 atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
 			  struct hw_atl_rxd_wb_s *rxd_wb)
@@ -637,12 +929,232 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	return nb_rx;
 }
 
+static void
+atl_xmit_cleanup(struct atl_tx_queue *txq)
+{
+	struct atl_tx_entry *sw_ring;
+	struct hw_atl_txd_s *txd;
+	int to_clean = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (txq != NULL) {
+		sw_ring = txq->sw_ring;
+		int head = txq->tx_head;
+		int cnt;
+		int i;
+
+		for (i = 0, cnt = head; ; i++) {
+			txd = &txq->hw_ring[cnt];
+
+			if (txd->dd)
+				to_clean++;
+
+			cnt = (cnt + 1) % txq->nb_tx_desc;
+			if (cnt == txq->tx_tail)
+				break;
+		}
+
+		if (to_clean == 0)
+			return;
+
+		while (to_clean) {
+			txd = &txq->hw_ring[head];
+
+			struct atl_tx_entry *rx_entry = &sw_ring[head];
+
+			if (rx_entry->mbuf) {
+				rte_pktmbuf_free_seg(rx_entry->mbuf);
+				rx_entry->mbuf = NULL;
+			}
+
+			if (txd->dd)
+				to_clean--;
+
+			txd->buf_addr = 0;
+			txd->flags = 0;
+
+			head = (head + 1) % txq->nb_tx_desc;
+			txq->tx_free++;
+		}
+
+		txq->tx_head = head;
+	}
+}
+
+static int
+atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
+{
+	uint32_t tx_cmd = 0;
+	uint64_t ol_flags = tx_pkt->ol_flags;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (ol_flags & PKT_TX_TCP_SEG) {
+		PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
+
+		tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
+
+		txc->cmd = 0x4;
+
+		if (ol_flags & PKT_TX_IPV6)
+			txc->cmd |= 0x2;
+
+		txc->l2_len = tx_pkt->l2_len;
+		txc->l3_len = tx_pkt->l3_len;
+		txc->l4_len = tx_pkt->l4_len;
+
+		txc->mss_len = tx_pkt->tso_segsz;
+	}
+
+	if (ol_flags & PKT_TX_VLAN_PKT) {
+		tx_cmd |= tx_desc_cmd_vlan;
+		txc->vlan_tag = tx_pkt->vlan_tci;
+	}
+
+	if (tx_cmd) {
+		txc->type = tx_desc_type_ctx;
+		txc->idx = 0;
+	}
+
+	return tx_cmd;
+}
+
+static inline void
+atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
+		       uint32_t tx_cmd)
+{
+	txd->cmd |= tx_desc_cmd_fcs;
+	txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+	/* L4 csum requested */
+	txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+	txd->cmd |= tx_cmd;
+}
+
+static inline void
+atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
+	     struct rte_mbuf *tx_pkt)
+{
+	uint32_t pay_len = 0;
+	int tail = 0;
+	struct atl_tx_entry *tx_entry;
+	uint64_t buf_dma_addr;
+	struct rte_mbuf *m_seg;
+	union hw_atl_txc_s *txc = NULL;
+	struct hw_atl_txd_s *txd = NULL;
+	u32 tx_cmd = 0U;
+	int desc_count = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	tail = txq->tx_tail;
+
+	txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
+
+	txc->flags1 = 0U;
+	txc->flags2 = 0U;
+
+	tx_cmd = atl_tso_setup(tx_pkt, txc);
+
+	if (tx_cmd) {
+		/* We've consumed the first desc, adjust counters */
+		tail = (tail + 1) % txq->nb_tx_desc;
+		txq->tx_tail = tail;
+		txq->tx_free -= 1;
+
+		txd = &txq->hw_ring[tail];
+		txd->flags = 0U;
+	} else {
+		txd = (struct hw_atl_txd_s *)txc;
+	}
+
+	txd->ct_en = !!tx_cmd;
+
+	txd->type = tx_desc_type_desc;
+
+	atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
+
+	if (tx_cmd)
+		txd->ct_idx = 0;
+
+	pay_len = tx_pkt->pkt_len;
+
+	txd->pay_len = pay_len;
+
+	for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
+		if (desc_count > 0) {
+			txd = &txq->hw_ring[tail];
+			txd->flags = 0U;
+		}
+
+		buf_dma_addr = rte_mbuf_data_iova(m_seg);
+		txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+		txd->type = tx_desc_type_desc;
+		txd->len = m_seg->data_len;
+		txd->pay_len = pay_len;
+
+		/* Store mbuf for freeing later */
+		tx_entry = &txq->sw_ring[tail];
+
+		if (tx_entry->mbuf)
+			rte_pktmbuf_free_seg(tx_entry->mbuf);
+		tx_entry->mbuf = m_seg;
+
+		tail = (tail + 1) % txq->nb_tx_desc;
+
+		desc_count++;
+	}
+
+	// Last descriptor requires EOP and WB
+	txd->eop = 1U;
+	txd->cmd |= tx_desc_cmd_wb;
+
+	hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
+
+	txq->tx_tail = tail;
+
+	txq->tx_free -= desc_count;
+}
 
 uint16_t
-atl_xmit_pkts(void *tx_queue __rte_unused,
-	      struct rte_mbuf **tx_pkts __rte_unused,
-	      uint16_t nb_pkts __rte_unused)
+atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	return 0;
+	struct rte_eth_dev *dev = NULL;
+	struct aq_hw_s *hw = NULL;
+	struct atl_tx_queue *txq = tx_queue;
+	struct rte_mbuf *tx_pkt;
+	uint16_t nb_tx;
+
+	dev = &rte_eth_devices[txq->port_id];
+	hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_TX_LOG(DEBUG, "txq%d pkts: %d tx_free=%d tx_tail=%d tx_head=%d",
+		txq->queue_id, nb_pkts, txq->tx_free,
+		txq->tx_tail, txq->tx_head);
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		tx_pkt = *tx_pkts++;
+
+		/* Clean Tx queue if needed */
+		if (txq->tx_free < txq->tx_free_thresh)
+			atl_xmit_cleanup(txq);
+
+		/* Check if we have enough free descriptors */
+		if (txq->tx_free < tx_pkt->nb_segs)
+			break;
+
+		/* check mbuf is valid */
+		if ((tx_pkt->nb_segs == 0) ||
+			((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL)))
+			break;
+
+		/* Send the packet */
+		atl_xmit_pkt(hw, txq, tx_pkt);
+	}
+
+	PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx);
+
+	return nb_tx;
 }
 
-- 
2.7.4

  parent reply	other threads:[~2018-09-29 10:31 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-29 10:30 [PATCH v3 00/22] net/atlantic: Aquantia aQtion 10G NIC Family DPDK PMD driver Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 01/22] net/atlantic: atlantic PMD driver skeleton Igor Russkikh
2018-10-03 18:48   ` Ferruh Yigit
2018-09-29 10:30 ` [PATCH v3 02/22] net/atlantic: logging macroes and some typedefs Igor Russkikh
2018-10-03 18:49   ` Ferruh Yigit
2018-09-29 10:30 ` [PATCH v3 03/22] net/atlantic: hardware register access routines Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 04/22] net/atlantic: hw_atl register declarations Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 05/22] net/atlantic: firmware operations layer Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 06/22] net/atlantic: b0 hardware layer main logic Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 07/22] net/atlantic: rte device start, stop, initial configuration Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 08/22] net/atlantic: TX/RX function prototypes Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 09/22] net/atlantic: RX side structures and implementation Igor Russkikh
2018-09-29 10:30 ` Igor Russkikh [this message]
2018-09-29 10:30 ` [PATCH v3 11/22] net/atlantic: link status and interrupt management Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 12/22] net/atlantic: device statistics, xstats Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 13/22] net/atlantic: support for RX/TX descriptors information Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 14/22] net/atlantic: promisc and allmulti configuration Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 15/22] net/atlantic: RSS and RETA manipulation API Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 16/22] net/atlantic: flow control configuration Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 17/22] net/atlantic: MAC address manipulations Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 18/22] net/atlantic: VLAN filters and offloads Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 19/22] net/atlantic: eeprom and register manipulation routines Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 20/22] net/atlantic: LED control DPDK and private APIs Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 21/22] net/atlantic: support for read MAC registers for debug purposes Igor Russkikh
2018-09-29 10:30 ` [PATCH v3 22/22] net/atlantic: documentation and rel notes Igor Russkikh
2018-10-03 18:47 ` [PATCH v3 00/22] net/atlantic: Aquantia aQtion 10G NIC Family DPDK PMD driver Ferruh Yigit
2018-10-04  9:42   ` Igor Russkikh
2018-10-04 10:29     ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=88d51a926b48f55695fab80c120913ee88bd6122.1538215990.git.igor.russkikh@aquantia.com \
    --to=igor.russkikh@aquantia.com \
    --cc=dev@dpdk.org \
    --cc=pavel.belous@aquantia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.