All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alfredo Cardigliano <cardigliano@ntop.org>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 14/17] net/ionic: add RX and TX handling
Date: Sat, 12 Oct 2019 02:27:37 +0200	[thread overview]
Message-ID: <157084005745.11524.16250957328673127517.stgit@devele> (raw)
In-Reply-To: <157083994018.11524.11276616720287263690.stgit@devele>

Add RX and TX queues setup and handling.

Signed-off-by: Alfredo Cardigliano <cardigliano@ntop.org>
Reviewed-by: Shannon Nelson <snelson@pensando.io>
---
 doc/guides/nics/features/ionic.ini |   10 
 drivers/net/ionic/Makefile         |    1 
 drivers/net/ionic/ionic_dev.h      |    1 
 drivers/net/ionic/ionic_ethdev.c   |  114 ++++
 drivers/net/ionic/ionic_lif.c      |  218 ++++++++
 drivers/net/ionic/ionic_lif.h      |   42 ++
 drivers/net/ionic/ionic_rxtx.c     |  993 ++++++++++++++++++++++++++++++++++++
 drivers/net/ionic/ionic_rxtx.h     |   44 ++
 drivers/net/ionic/meson.build      |    1 
 9 files changed, 1423 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ionic/ionic_rxtx.c
 create mode 100644 drivers/net/ionic/ionic_rxtx.h

diff --git a/doc/guides/nics/features/ionic.ini b/doc/guides/nics/features/ionic.ini
index b3f6c3497..59f0753db 100644
--- a/doc/guides/nics/features/ionic.ini
+++ b/doc/guides/nics/features/ionic.ini
@@ -7,7 +7,12 @@
 Speed capabilities   = Y
 Link status          = Y
 Link status event    = Y
+Queue start/stop     = Y
 MTU update           = Y
+Jumbo frame          = Y
+Scattered Rx         = Y
+LRO                  = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
@@ -16,6 +21,11 @@ RSS key update       = Y
 RSS reta update      = Y
 VLAN filter          = Y
 Flow control         = Y
+CRC offload          = Y
+VLAN offload         = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
+Packet type parsing  = Y
 Linux UIO            = Y
 Linux VFIO           = Y
 x86-64               = Y
diff --git a/drivers/net/ionic/Makefile b/drivers/net/ionic/Makefile
index 7827e2b69..7649d674d 100644
--- a/drivers/net/ionic/Makefile
+++ b/drivers/net/ionic/Makefile
@@ -55,6 +55,7 @@ LDLIBS += -lrte_bus_pci
 #
 SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_mac_api.c
 SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rx_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_dev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_lif.c
diff --git a/drivers/net/ionic/ionic_dev.h b/drivers/net/ionic/ionic_dev.h
index 02fcfdee8..7bbb944b3 100644
--- a/drivers/net/ionic/ionic_dev.h
+++ b/drivers/net/ionic/ionic_dev.h
@@ -25,6 +25,7 @@
 
 #define IONIC_MAX_RING_DESC		32768
 #define IONIC_MIN_RING_DESC		16
+#define IONIC_DEF_TXRX_DESC		4096
 
 #define IONIC_LIFS_MAX			1024
 
diff --git a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c
index 196113089..d91513bc8 100644
--- a/drivers/net/ionic/ionic_ethdev.c
+++ b/drivers/net/ionic/ionic_ethdev.c
@@ -15,6 +15,7 @@
 #include "ionic_mac_api.h"
 #include "ionic_lif.h"
 #include "ionic_ethdev.h"
+#include "ionic_rxtx.h"
 
 static int  eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
 static int  eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev);
@@ -41,6 +42,7 @@ static int  ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 		struct rte_eth_rss_conf *rss_conf);
 static int  ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 		struct rte_eth_rss_conf *rss_conf);
+static int  ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask);
 
 int ionic_logtype_init;
 int ionic_logtype_driver;
@@ -52,6 +54,20 @@ static const struct rte_pci_id pci_id_ionic_map[] = {
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+	.nb_max = IONIC_MAX_RING_DESC,
+	.nb_min = IONIC_MIN_RING_DESC,
+	.nb_align = 1,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = IONIC_MAX_RING_DESC,
+	.nb_min = IONIC_MIN_RING_DESC,
+	.nb_align = 1,
+	.nb_seg_max = IONIC_TX_MAX_SG_ELEMS,
+	.nb_mtu_seg_max = IONIC_TX_MAX_SG_ELEMS,
+};
+
 static const struct eth_dev_ops ionic_eth_dev_ops = {
 	.dev_infos_get          = ionic_dev_info_get,
 	.dev_configure          = ionic_dev_configure,
@@ -76,6 +92,17 @@ static const struct eth_dev_ops ionic_eth_dev_ops = {
 	.reta_query             = ionic_dev_rss_reta_query,
 	.rss_hash_conf_get      = ionic_dev_rss_hash_conf_get,
 	.rss_hash_update        = ionic_dev_rss_hash_update,
+	.rxq_info_get           = ionic_rxq_info_get,
+	.txq_info_get           = ionic_txq_info_get,
+	.rx_queue_setup         = ionic_dev_rx_queue_setup,
+	.rx_queue_release       = ionic_dev_rx_queue_release,
+	.rx_queue_start	        = ionic_dev_rx_queue_start,
+	.rx_queue_stop          = ionic_dev_rx_queue_stop,
+	.tx_queue_setup         = ionic_dev_tx_queue_setup,
+	.tx_queue_release       = ionic_dev_tx_queue_release,
+	.tx_queue_start	        = ionic_dev_tx_queue_start,
+	.tx_queue_stop          = ionic_dev_tx_queue_stop,
+	.vlan_offload_set       = ionic_vlan_offload_set,
 };
 
 /*
@@ -266,6 +293,50 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
 		ETH_LINK_SPEED_50G |
 		ETH_LINK_SPEED_100G;
 
+	/*
+	 * Per-queue capabilities. Actually most of the offloads are enabled
+	 * by default on the port and can be used on selected queues (by adding
+	 * packet flags at runtime when required)
+	 */
+
+	dev_info->rx_queue_offload_capa =
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		0;
+
+	dev_info->tx_queue_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		0;
+
+	/*
+	 * Per-port capabilities
+	 * See ionic_set_features to request and check supported features
+	 */
+
+	dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_SCATTER |
+		0;
+
+	dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa |
+		DEV_TX_OFFLOAD_MULTI_SEGS |
+		DEV_TX_OFFLOAD_TCP_TSO |
+		0;
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
+
+	/* Driver-preferred Rx/Tx parameters */
+	dev_info->default_rxportconf.burst_size = 32;
+	dev_info->default_txportconf.burst_size = 32;
+	dev_info->default_rxportconf.nb_queues = 1;
+	dev_info->default_txportconf.nb_queues = 1;
+	dev_info->default_rxportconf.ring_size = IONIC_DEF_TXRX_DESC;
+	dev_info->default_txportconf.ring_size = IONIC_DEF_TXRX_DESC;
+
 	return 0;
 }
 
@@ -478,6 +549,44 @@ ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
 	return 0;
 }
 
+static int
+ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
+	struct rte_eth_rxmode *rxmode;
+	rxmode = &eth_dev->data->dev_conf.rxmode;
+	int i;
+
+	if (mask & ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+				struct ionic_qcq *rxq =
+						eth_dev->data->rx_queues[i];
+				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+			}
+			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
+		} else {
+			for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+				struct ionic_qcq *rxq =
+						eth_dev->data->rx_queues[i];
+				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+			}
+			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
+		}
+	}
+
+	if (mask & ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+			lif->features |= IONIC_ETH_HW_VLAN_RX_FILTER;
+		else
+			lif->features &= ~IONIC_ETH_HW_VLAN_RX_FILTER;
+	}
+
+	ionic_lif_set_features(lif);
+
+	return 0;
+}
+
 static int
 ionic_dev_configure(struct rte_eth_dev *eth_dev)
 {
@@ -609,6 +718,9 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params)
 	ionic_init_print_call();
 
 	eth_dev->dev_ops = &ionic_eth_dev_ops;
+	eth_dev->rx_pkt_burst = &ionic_recv_pkts;
+	eth_dev->tx_pkt_burst = &ionic_xmit_pkts;
+	eth_dev->tx_pkt_prepare = &ionic_prep_pkts;
 
 	/* Multi-process not supported, primary does initialization anyway */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -676,6 +788,8 @@ eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev)
 	ionic_lif_free(lif);
 
 	eth_dev->dev_ops = NULL;
+	eth_dev->rx_pkt_burst = NULL;
+	eth_dev->tx_pkt_burst = NULL;
 
 	return 0;
 }
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index 045994787..a8eed76bd 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -10,6 +10,7 @@
 #include "ionic_lif.h"
 #include "ionic_ethdev.h"
 #include "ionic_rx_filter.h"
+#include "ionic_rxtx.h"
 
 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
@@ -583,6 +584,52 @@ ionic_qcq_free(struct ionic_qcq *qcq)
 	rte_free(qcq);
 }
 
+int
+ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t nrxq_descs,
+		struct ionic_qcq **qcq)
+{
+	uint32_t flags;
+	int err = -ENOMEM;
+
+	flags = IONIC_QCQ_F_SG;
+	err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, index, "rx", flags,
+			nrxq_descs,
+			sizeof(struct ionic_rxq_desc),
+			sizeof(struct ionic_rxq_comp),
+			sizeof(struct ionic_rxq_sg_desc),
+			lif->kern_pid, &lif->rxqcqs[index]);
+
+	if (err)
+		return err;
+
+	*qcq = lif->rxqcqs[index];
+
+	return 0;
+}
+
+int
+ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t ntxq_descs,
+		struct ionic_qcq **qcq)
+{
+	uint32_t flags;
+	int err = -ENOMEM;
+
+	flags = IONIC_QCQ_F_SG;
+	err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, index, "tx", flags,
+			ntxq_descs,
+			sizeof(struct ionic_txq_desc),
+			sizeof(struct ionic_txq_comp),
+			sizeof(struct ionic_txq_sg_desc),
+			lif->kern_pid, &lif->txqcqs[index]);
+
+	if (err)
+		return err;
+
+	*qcq = lif->txqcqs[index];
+
+	return 0;
+}
+
 static int
 ionic_admin_qcq_alloc(struct ionic_lif *lif)
 {
@@ -660,6 +707,22 @@ ionic_lif_alloc(struct ionic_lif *lif)
 		return -ENOMEM;
 	}
 
+	lif->txqcqs = rte_zmalloc("ionic", sizeof(*lif->txqcqs) *
+			adapter->max_ntxqs_per_lif, 0);
+
+	if (!lif->txqcqs) {
+		ionic_init_print(ERR, "Cannot allocate tx queues array");
+		return -ENOMEM;
+	}
+
+	lif->rxqcqs = rte_zmalloc("ionic", sizeof(*lif->rxqcqs) *
+			adapter->max_nrxqs_per_lif, 0);
+
+	if (!lif->rxqcqs) {
+		ionic_init_print(ERR, "Cannot allocate rx queues array");
+		return -ENOMEM;
+	}
+
 	ionic_init_print(DEBUG, "Allocating Notify Queue");
 
 	err = ionic_notify_qcq_alloc(lif);
@@ -710,6 +773,16 @@ ionic_lif_free(struct ionic_lif *lif)
 		lif->adminqcq = NULL;
 	}
 
+	if (lif->txqcqs) {
+		rte_free(lif->txqcqs);
+		lif->txqcqs = NULL;
+	}
+
+	if (lif->rxqcqs) {
+		rte_free(lif->rxqcqs);
+		lif->rxqcqs = NULL;
+	}
+
 	if (lif->info)
 		rte_memzone_free(lif->info_z);
 }
@@ -820,6 +893,18 @@ ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
 	qcq->flags &= ~IONIC_QCQ_F_INITED;
 }
 
+void
+ionic_lif_txq_deinit(struct ionic_qcq *qcq)
+{
+	ionic_lif_qcq_deinit(qcq->lif, qcq);
+}
+
+void
+ionic_lif_rxq_deinit(struct ionic_qcq *qcq)
+{
+	ionic_lif_qcq_deinit(qcq->lif, qcq);
+}
+
 bool
 ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index,
 		void *cb_arg __rte_unused)
@@ -1085,6 +1170,102 @@ ionic_lif_set_features(struct ionic_lif *lif)
 	return 0;
 }
 
+int
+ionic_lif_txq_init(struct ionic_qcq *qcq)
+{
+	struct ionic_queue *q = &qcq->q;
+	struct ionic_lif *lif = qcq->lif;
+	struct ionic_cq *cq = &qcq->cq;
+	struct ionic_admin_ctx ctx = {
+		.pending_work = true,
+		.cmd.q_init = {
+			.opcode = IONIC_CMD_Q_INIT,
+			.lif_index = lif->index,
+			.type = q->type,
+			.index = q->index,
+			.flags = IONIC_QINIT_F_SG,
+			.intr_index = cq->bound_intr->index,
+			.pid = q->pid,
+			.ring_size = ilog2(q->num_descs),
+			.ring_base = q->base_pa,
+			.cq_ring_base = cq->base_pa,
+			.sg_ring_base = q->sg_base_pa,
+		},
+	};
+	int err;
+
+	ionic_init_print(DEBUG, "txq_init.pid %d", ctx.cmd.q_init.pid);
+	ionic_init_print(DEBUG, "txq_init.index %d", ctx.cmd.q_init.index);
+	ionic_init_print(DEBUG, "txq_init.ring_base 0x%lx",
+			ctx.cmd.q_init.ring_base);
+	ionic_init_print(DEBUG, "txq_init.ring_size %d",
+			ctx.cmd.q_init.ring_size);
+
+	err = ionic_adminq_post_wait(qcq->lif, &ctx);
+	if (err)
+		return err;
+
+	q->hw_type = ctx.comp.q_init.hw_type;
+	q->hw_index = ctx.comp.q_init.hw_index;
+	q->db = ionic_db_map(lif, q);
+
+	ionic_init_print(DEBUG, "txq->hw_type %d", q->hw_type);
+	ionic_init_print(DEBUG, "txq->hw_index %d", q->hw_index);
+	ionic_init_print(DEBUG, "txq->db %p", q->db);
+
+	qcq->flags |= IONIC_QCQ_F_INITED;
+
+	return 0;
+}
+
+int
+ionic_lif_rxq_init(struct ionic_qcq *qcq)
+{
+	struct ionic_queue *q = &qcq->q;
+	struct ionic_lif *lif = qcq->lif;
+	struct ionic_cq *cq = &qcq->cq;
+	struct ionic_admin_ctx ctx = {
+		.pending_work = true,
+		.cmd.q_init = {
+			.opcode = IONIC_CMD_Q_INIT,
+			.lif_index = lif->index,
+			.type = q->type,
+			.index = q->index,
+			.flags = IONIC_QINIT_F_SG,
+			.intr_index = cq->bound_intr->index,
+			.pid = q->pid,
+			.ring_size = ilog2(q->num_descs),
+			.ring_base = q->base_pa,
+			.cq_ring_base = cq->base_pa,
+			.sg_ring_base = q->sg_base_pa,
+		},
+	};
+	int err;
+
+	ionic_init_print(DEBUG, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
+	ionic_init_print(DEBUG, "rxq_init.index %d\n", ctx.cmd.q_init.index);
+	ionic_init_print(DEBUG, "rxq_init.ring_base 0x%lx\n",
+			ctx.cmd.q_init.ring_base);
+	ionic_init_print(DEBUG, "rxq_init.ring_size %d\n",
+			ctx.cmd.q_init.ring_size);
+
+	err = ionic_adminq_post_wait(qcq->lif, &ctx);
+	if (err)
+		return err;
+
+	q->hw_type = ctx.comp.q_init.hw_type;
+	q->hw_index = ctx.comp.q_init.hw_index;
+	q->db = ionic_db_map(lif, q);
+
+	qcq->flags |= IONIC_QCQ_F_INITED;
+
+	ionic_init_print(DEBUG, "rxq->hw_type %d", q->hw_type);
+	ionic_init_print(DEBUG, "rxq->hw_index %d", q->hw_index);
+	ionic_init_print(DEBUG, "rxq->db %p", q->db);
+
+	return 0;
+}
+
 static int
 ionic_station_set(struct ionic_lif *lif)
 {
@@ -1169,7 +1350,17 @@ ionic_lif_init(struct ionic_lif *lif)
 	if (err)
 		goto err_out_adminq_deinit;
 
-	lif->features = 0;
+	lif->features =
+                  IONIC_ETH_HW_VLAN_TX_TAG
+		| IONIC_ETH_HW_VLAN_RX_STRIP
+		| IONIC_ETH_HW_VLAN_RX_FILTER
+		| IONIC_ETH_HW_RX_HASH
+		| IONIC_ETH_HW_TX_SG
+		| IONIC_ETH_HW_RX_SG
+		| IONIC_ETH_HW_RX_CSUM
+		| IONIC_ETH_HW_TSO
+		| IONIC_ETH_HW_TSO_IPV6
+		| IONIC_ETH_HW_TSO_ECN;
 
 	err = ionic_lif_set_features(lif);
 
@@ -1250,6 +1441,7 @@ int
 ionic_lif_start(struct ionic_lif *lif)
 {
 	uint32_t rx_mode = 0;
+	uint32_t i;
 	int err;
 
 	ionic_init_print(DEBUG, "Setting RSS configuration on port %u",
@@ -1270,6 +1462,30 @@ ionic_lif_start(struct ionic_lif *lif)
 
 	ionic_set_rx_mode(lif, rx_mode);
 
+	ionic_init_print(DEBUG, "Starting %u RX queues and %u TX queues "
+			"on port %u",
+			lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
+
+	for (i = 0; i < lif->nrxqcqs; i++) {
+		struct ionic_qcq *rxq = lif->rxqcqs[i];
+		if (!rxq->deferred_start) {
+			err = ionic_dev_rx_queue_start(lif->eth_dev, i);
+
+			if (err)
+				return err;
+		}
+	}
+
+	for (i = 0; i < lif->ntxqcqs; i++) {
+		struct ionic_qcq *txq = lif->txqcqs[i];
+		if (!txq->deferred_start) {
+			err = ionic_dev_tx_queue_start(lif->eth_dev, i);
+
+			if (err)
+				return err;
+		}
+	}
+
 	ionic_link_status_check(lif);
 
 	/* Carrier ON here */
diff --git a/drivers/net/ionic/ionic_lif.h b/drivers/net/ionic/ionic_lif.h
index faf2f6294..a72196de8 100644
--- a/drivers/net/ionic/ionic_lif.h
+++ b/drivers/net/ionic/ionic_lif.h
@@ -25,6 +25,26 @@
 	IONIC_RSS_TYPE_IPV6_TCP | \
 	IONIC_RSS_TYPE_IPV6_UDP)
 
+#define IONIC_GET_SG_CNTR_IDX(num_sg_elems)	(num_sg_elems)
+
+struct ionic_tx_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t drop;
+	uint64_t stop;
+	uint64_t tso;
+	uint64_t frags;
+};
+
+struct ionic_rx_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t no_cb_arg;
+	uint64_t bad_cq_status;
+	uint64_t no_room;
+	uint64_t bad_len;
+};
+
 #define IONIC_QCQ_F_INITED	BIT(0)
 #define IONIC_QCQ_F_SG		BIT(1)
 #define IONIC_QCQ_F_INTR	BIT(2)
@@ -32,18 +52,28 @@
 
 /* Queue / Completion Queue */
 struct ionic_qcq {
+	uint64_t offloads;
 	struct ionic_queue q;        /**< Queue */
 	struct ionic_cq cq;          /**< Completion Queue */
 	struct ionic_lif *lif;       /**< LIF */
 	struct rte_mempool *mb_pool; /**< mbuf pool to populate the RX ring */
+	union {
+		struct ionic_tx_stats tx;
+		struct ionic_rx_stats rx;
+	} stats;
 	const struct rte_memzone *base_z;
 	void *base;
 	rte_iova_t base_pa;
 	uint32_t total_size;
 	uint32_t flags;
 	struct ionic_intr_info intr;
+	bool deferred_start;
 };
 
+#define IONIC_Q_TO_QCQ(q)	container_of(q, struct ionic_qcq, q)
+#define IONIC_Q_TO_TX_STATS(q)	(&IONIC_Q_TO_QCQ(q)->stats.tx)
+#define IONIC_Q_TO_RX_STATS(q)	(&IONIC_Q_TO_QCQ(q)->stats.rx)
+
 #define IONIC_LIF_F_INITED		BIT(0)
 #define IONIC_LIF_F_LINK_CHECK_NEEDED	BIT(1)
 
@@ -64,6 +94,8 @@ struct ionic_lif {
 	rte_spinlock_t adminq_service_lock;
 	struct ionic_qcq *adminqcq;
 	struct ionic_qcq *notifyqcq;
+	struct ionic_qcq **txqcqs;
+	struct ionic_qcq **rxqcqs;
 	struct ionic_rx_filters rx_filters;
 	struct ionic_doorbell __iomem *kern_dbpage;
 	uint64_t last_eid;
@@ -122,11 +154,21 @@ int ionic_dev_promiscuous_disable(struct rte_eth_dev *dev);
 int ionic_dev_allmulticast_enable(struct rte_eth_dev *dev);
 int ionic_dev_allmulticast_disable(struct rte_eth_dev *dev);
 
+int ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index,
+		uint16_t nrxq_descs, struct ionic_qcq **qcq);
+int ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index,
+		uint16_t ntxq_descs, struct ionic_qcq **qcq);
 void ionic_qcq_free(struct ionic_qcq *qcq);
 
 int ionic_qcq_enable(struct ionic_qcq *qcq);
 int ionic_qcq_disable(struct ionic_qcq *qcq);
 
+int ionic_lif_rxq_init(struct ionic_qcq *qcq);
+void ionic_lif_rxq_deinit(struct ionic_qcq *qcq);
+
+int ionic_lif_txq_init(struct ionic_qcq *qcq);
+void ionic_lif_txq_deinit(struct ionic_qcq *qcq);
+
 int ionic_lif_rss_config(struct ionic_lif *lif, const uint16_t types,
 		const uint8_t *key, const uint32_t *indir);
 
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
new file mode 100644
index 000000000..1a7313cc3
--- /dev/null
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -0,0 +1,993 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_ip.h>
+#include <rte_net.h>
+
+#include "ionic_logs.h"
+#include "ionic_mac_api.h"
+#include "ionic_ethdev.h"
+#include "ionic_lif.h"
+#include "ionic_rxtx.h"
+
+#define IONIC_RX_RING_DOORBELL_STRIDE		(32 - 1)
+
+/*********************************************************************
+ *
+ *  TX functions
+ *
+ **********************************************************************/
+
+void
+ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+		struct rte_eth_txq_info *qinfo)
+{
+	struct ionic_qcq *txq = dev->data->tx_queues[queue_id];
+	struct ionic_queue *q = &txq->q;
+
+	qinfo->nb_desc = q->num_descs;
+	qinfo->conf.offloads = txq->offloads;
+	qinfo->conf.tx_deferred_start = txq->deferred_start;
+}
+
+static inline void __attribute__((cold))
+ionic_tx_flush(struct ionic_cq *cq)
+{
+	struct ionic_queue *q = cq->bound_q;
+	struct ionic_desc_info *q_desc_info;
+	struct rte_mbuf *txm, *next;
+	struct ionic_txq_comp *cq_desc_base = cq->base;
+	struct ionic_txq_comp *cq_desc;
+	u_int32_t comp_index = (u_int32_t)-1;
+
+	cq_desc = &cq_desc_base[cq->tail_idx];
+	while (color_match(cq_desc->color, cq->done_color)) {
+		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+		/* Prefetch the next 4 descriptors (not really useful here) */
+		if ((cq->tail_idx & 0x3) == 0)
+			rte_prefetch0(&cq_desc_base[cq->tail_idx]); /* cq desc */
+
+		if (cq->tail_idx == 0)
+			cq->done_color = !cq->done_color;
+
+		comp_index = cq_desc->comp_index;
+
+		cq_desc = &cq_desc_base[cq->tail_idx];
+	}
+
+	if (comp_index != (u_int32_t)-1) {
+		while (q->tail_idx != comp_index) {
+			q_desc_info = &q->info[q->tail_idx];
+
+			q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+
+			/* Prefetch the next 4 descriptors */
+			if ((q->tail_idx & 0x3) == 0)
+				/* q desc info */
+				rte_prefetch0(&q->info[q->tail_idx]);
+
+			/*
+			 * Note: you can just use rte_pktmbuf_free,
+			 * but this loop is faster
+			 */
+			txm = q_desc_info->cb_arg;
+			while (txm != NULL) {
+				next = txm->next;
+				rte_pktmbuf_free_seg(txm);
+				txm = next;
+			}
+		}
+	}
+}
+
+void __attribute__((cold))
+ionic_dev_tx_queue_release(void *tx_queue)
+{
+	struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
+
+	ionic_init_print_call();
+
+	ionic_qcq_free(txq);
+}
+
+int __attribute__((cold))
+ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+	struct ionic_qcq *txq;
+
+	ionic_init_print_call();
+
+	txq = eth_dev->data->tx_queues[tx_queue_id];
+
+	/*
+	 * Note: we should better post NOP Tx desc and wait for its completion
+	 * before disabling Tx queue
+	 */
+
+	ionic_qcq_disable(txq);
+
+	ionic_tx_flush(&txq->cq);
+
+	ionic_lif_txq_deinit(txq);
+
+	eth_dev->data->tx_queue_state[tx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+int __attribute__((cold))
+ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
+		uint16_t nb_desc, uint32_t socket_id,
+		const struct rte_eth_txconf *tx_conf)
+{
+	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
+	struct ionic_qcq *txq;
+	uint64_t offloads;
+	int err;
+
+	ionic_init_print_call();
+
+	ionic_init_print(DEBUG, "Configuring TX queue %u with %u buffers",
+		tx_queue_id, nb_desc);
+
+	if (tx_queue_id >= lif->ntxqcqs) {
+		ionic_init_print(DEBUG, "Queue index %u not available "
+				"(max %u queues)",
+				tx_queue_id, lif->ntxqcqs);
+		return -EINVAL;
+	}
+
+	offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
+
+	/* Validate number of receive descriptors */
+	if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
+		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
+
+	/* Free memory prior to re-allocation if needed... */
+	if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
+		ionic_dev_tx_queue_release(eth_dev->data->tx_queues[tx_queue_id]);
+		eth_dev->data->tx_queues[tx_queue_id] = NULL;
+	}
+
+	err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
+
+	if (err) {
+		ionic_init_print(DEBUG, "Queue allocation failure");
+		return -EINVAL;
+	}
+
+	/* Do not start queue with rte_eth_dev_start() */
+	txq->deferred_start = tx_conf->tx_deferred_start;
+
+	txq->offloads = offloads;
+
+	eth_dev->data->tx_queues[tx_queue_id] = txq;
+
+	return 0;
+}
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int __attribute__((cold))
+ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+	struct ionic_qcq *txq;
+	int err;
+
+	ionic_init_print_call();
+
+	txq = eth_dev->data->tx_queues[tx_queue_id];
+
+	err = ionic_lif_txq_init(txq);
+
+	if (err)
+		return err;
+
+	ionic_qcq_enable(txq);
+
+	eth_dev->data->tx_queue_state[tx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static void
+ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
+		struct rte_mbuf *txm,
+		rte_iova_t addr, uint8_t nsge, uint16_t len,
+		uint32_t hdrlen, uint32_t mss,
+		uint16_t vlan_tci, bool has_vlan,
+		bool start, bool done)
+{
+	uint8_t flags = 0;
+	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
+	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
+	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
+
+	desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
+					flags, nsge, addr);
+	desc->len = len;
+	desc->vlan_tci = vlan_tci;
+	desc->hdr_len = hdrlen;
+	desc->mss = mss;
+
+	ionic_q_post(q, done, NULL, done ? txm : NULL);
+}
+
+static struct ionic_txq_desc *
+ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem)
+{
+	struct ionic_txq_desc *desc_base = q->base;
+	struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
+	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
+	struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
+
+	*elem = sg_desc->elems;
+	return desc;
+}
+
+static int
+ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, uint64_t offloads,
+		bool not_xmit_more)
+{
+	struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
+	struct ionic_txq_desc *desc;
+	struct ionic_txq_sg_elem *elem;
+	struct rte_mbuf *txm_seg;
+	uint64_t desc_addr;
+	uint16_t desc_len;
+	uint8_t desc_nsge;
+	uint32_t hdrlen;
+	uint32_t mss = txm->tso_segsz;
+	uint32_t frag_left = 0;
+	uint32_t left;
+	uint32_t seglen;
+	uint32_t len;
+	uint32_t offset = 0;
+	bool start, done;
+	bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
+	uint16_t vlan_tci = txm->vlan_tci;
+
+	hdrlen = txm->l2_len + txm->l3_len;
+
+	seglen = hdrlen + mss;
+	left = txm->data_len;
+
+	desc = ionic_tx_tso_next(q, &elem);
+	start = true;
+
+	/* Chop data up into desc segments */
+
+	while (left > 0) {
+		len = RTE_MIN(seglen, left);
+		frag_left = seglen - len;
+		desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+		desc_len = len;
+		desc_nsge = 0;
+		left -= len;
+		offset += len;
+		if (txm->nb_segs > 1 && frag_left > 0)
+			continue;
+		done = (txm->nb_segs == 1 && left == 0);
+		ionic_tx_tso_post(q, desc, txm,
+				desc_addr, desc_nsge, desc_len,
+				hdrlen, mss,
+				vlan_tci, has_vlan,
+				start, done && not_xmit_more);
+		desc = ionic_tx_tso_next(q, &elem);
+		start = false;
+		seglen = mss;
+	}
+
+	/* Chop frags into desc segments */
+
+	txm_seg = txm->next;
+	while (txm_seg != NULL) {
+		offset = 0;
+		left = txm_seg->data_len;
+		stats->frags++;
+
+		while (left > 0) {
+			elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)) + offset;
+			if (frag_left > 0) {
+				len = RTE_MIN(frag_left, left);
+				frag_left -= len;
+				elem->len = len;
+				elem++;
+				desc_nsge++;
+			} else {
+				len = RTE_MIN(mss, left);
+				frag_left = mss - len;
+				desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
+				desc_len = len;
+				desc_nsge = 0;
+			}
+			left -= len;
+			offset += len;
+			if (txm_seg->next != NULL && frag_left > 0)
+				continue;
+			done = (txm_seg->next == NULL && left == 0);
+			ionic_tx_tso_post(q, desc, txm_seg,
+					desc_addr, desc_nsge, desc_len,
+					hdrlen, mss,
+					vlan_tci, has_vlan,
+					start, done && not_xmit_more);
+			desc = ionic_tx_tso_next(q, &elem);
+			start = false;
+		}
+
+		txm_seg = txm_seg->next;
+	}
+
+	stats->tso++;
+
+	return 0;
+}
+
+static int
+ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, uint64_t offloads,
+		bool not_xmit_more)
+{
+	struct ionic_txq_desc *desc_base = q->base;
+	struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
+	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
+	struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
+	struct ionic_txq_sg_elem *elem = sg_desc->elems;
+	struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
+	struct rte_mbuf *txm_seg;
+	bool has_vlan;
+	uint64_t ol_flags = txm->ol_flags;
+	uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
+	uint8_t flags = 0;
+
+	has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
+
+	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
+
+	desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
+	desc->len = txm->data_len;
+	desc->vlan_tci = txm->vlan_tci;
+
+	txm_seg = txm->next;
+	while (txm_seg != NULL) {
+		elem->len = txm_seg->data_len;
+		elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
+		stats->frags++;
+		elem++;
+		txm_seg = txm_seg->next;
+	}
+
+	ionic_q_post(q, not_xmit_more, NULL, txm);
+
+	return 0;
+}
+
+uint16_t
+ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts)
+{
+	struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
+	struct ionic_queue *q = &txq->q;
+	struct ionic_cq *cq = &txq->cq;
+	struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
+	uint32_t next_q_head_idx;
+	uint32_t bytes_tx = 0;
+	uint16_t nb_tx = 0;
+	int err;
+	bool last;
+
+	/* Cleaning old buffers */
+	ionic_tx_flush(cq);
+
+	if (unlikely(ionic_q_space_avail(q) < nb_pkts)) {
+		stats->stop += nb_pkts;
+		return 0;
+	}
+
+	while (nb_tx < nb_pkts) {
+		last = (nb_tx == (nb_pkts - 1));
+
+		next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1);
+		if ((next_q_head_idx & 0x3) == 0) {
+			struct ionic_txq_desc *desc_base = q->base;
+			rte_prefetch0(&desc_base[next_q_head_idx]); /* q desc */
+			rte_prefetch0(&q->info[next_q_head_idx]);   /* q desc info */
+		}
+
+		if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
+			err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads,
+					last);
+		else
+			err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last);
+
+		if (err) {
+			stats->drop += nb_pkts - nb_tx;
+			if (nb_tx > 0)
+				ionic_q_flush(q);
+			break;
+		}
+
+		bytes_tx += tx_pkts[nb_tx]->pkt_len;
+		nb_tx++;
+	}
+
+	stats->packets += nb_tx;
+	stats->bytes += bytes_tx;
+
+	return nb_tx;
+}
+
+/*********************************************************************
+ *
+ *  TX prep functions
+ *
+ **********************************************************************/
+
+#define IONIC_TX_OFFLOAD_MASK (	\
+	PKT_TX_IPV4 |		\
+	PKT_TX_IPV6 |		\
+	PKT_TX_VLAN |		\
+	PKT_TX_TCP_SEG |	\
+	PKT_TX_L4_MASK)
+
+#define IONIC_TX_OFFLOAD_NOTSUP_MASK \
+	(PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
+
+uint16_t
+ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct rte_mbuf *txm;
+	uint64_t offloads;
+	int i = 0;
+
+	for (i = 0; i < nb_pkts; i++) {
+		txm = tx_pkts[i];
+
+		if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) {
+			rte_errno = -EINVAL;
+			break;
+		}
+
+		offloads = txm->ol_flags;
+
+		if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
+			rte_errno = -ENOTSUP;
+			break;
+		}
+	}
+
+	return i;
+}
+
+/*********************************************************************
+ *
+ *  RX functions
+ *
+ **********************************************************************/
+
+static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
+		struct rte_mbuf *mbuf);
+
+void
+ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+		struct rte_eth_rxq_info *qinfo)
+{
+	struct ionic_qcq *rxq = dev->data->rx_queues[queue_id];
+	struct ionic_queue *q = &rxq->q;
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = q->num_descs;
+	qinfo->conf.rx_deferred_start = rxq->deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
+}
+
+static void __attribute__((cold))
+ionic_rx_empty(struct ionic_queue *q)
+{
+	struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
+	struct ionic_desc_info *cur;
+	struct rte_mbuf *mbuf;
+
+	while (q->tail_idx != q->head_idx) {
+		cur = &q->info[q->tail_idx];
+		mbuf = cur->cb_arg;
+		rte_mempool_put(rxq->mb_pool, mbuf);
+
+		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+	}
+}
+
+void __attribute__((cold))
+ionic_dev_rx_queue_release(void *rx_queue)
+{
+	struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
+
+	ionic_init_print_call();
+
+	ionic_rx_empty(&rxq->q);
+
+	ionic_qcq_free(rxq);
+}
+
+int __attribute__((cold))
+ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+			 uint16_t rx_queue_id,
+			 uint16_t nb_desc,
+			 uint32_t socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
+	struct ionic_qcq *rxq;
+	uint64_t offloads;
+	int err;
+
+	ionic_init_print_call();
+
+	ionic_init_print(DEBUG, "Configuring RX queue %u with %u buffers",
+			rx_queue_id, nb_desc);
+
+	if (rx_queue_id >= lif->nrxqcqs) {
+		ionic_init_print(ERR,
+				"Queue index %u not available (max %u queues)",
+				rx_queue_id, lif->nrxqcqs);
+		return -EINVAL;
+	}
+
+	offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
+
+	/* Validate number of receive descriptors */
+	if (!rte_is_power_of_2(nb_desc) ||
+			nb_desc < IONIC_MIN_RING_DESC ||
+			nb_desc > IONIC_MAX_RING_DESC) {
+		ionic_init_print(ERR,
+				"Bad number of descriptors (%u) for queue %u (min: %u)",
+				nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
+		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
+	}
+
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+		eth_dev->data->scattered_rx = 1;
+
+	/* Free memory prior to re-allocation if needed... */
+	if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
+		ionic_dev_rx_queue_release(eth_dev->data->rx_queues[rx_queue_id]);
+		eth_dev->data->rx_queues[rx_queue_id] = NULL;
+	}
+
+	err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);
+
+	if (err) {
+		ionic_init_print(ERR, "Queue allocation failure");
+		return -EINVAL;
+	}
+
+	rxq->mb_pool = mp;
+
+	/*
+	 * Note: the interface does not currently support DEV_RX_OFFLOAD_KEEP_CRC,
+	 * please also consider ETHER_CRC_LEN when the adapter will be able to
+	 * keep the CRC and subtract it to the length for all received packets:
+	 * if (eth_dev->data->dev_conf.rxmode.offloads &
+	 *     DEV_RX_OFFLOAD_KEEP_CRC)
+	 *   rxq->crc_len = ETHER_CRC_LEN;
+	 */
+
+	/* Do not start queue with rte_eth_dev_start() */
+	rxq->deferred_start = rx_conf->rx_deferred_start;
+
+	rxq->offloads = offloads;
+
+	eth_dev->data->rx_queues[rx_queue_id] = rxq;
+
+	return 0;
+}
+
+static void
+ionic_rx_clean(struct ionic_queue *q,
+		uint32_t q_desc_index, uint32_t cq_desc_index,
+		void *cb_arg, void *service_cb_arg)
+{
+	struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base;
+	struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
+	struct rte_mbuf *rxm = cb_arg;
+	struct rte_mbuf *rxm_seg;
+	struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
+	uint32_t max_frame_size =
+			rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	uint64_t pkt_flags;
+	uint32_t pkt_type;
+	struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q);
+	struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
+			service_cb_arg;
+	uint32_t buf_size = (uint16_t)
+			(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+	uint32_t left;
+
+	if (!recv_args) {
+		stats->no_cb_arg++;
+		/* Flush */
+		rte_pktmbuf_free(rxm);
+		/*
+		 * Note: rte_mempool_put is faster with no segs
+		 * rte_mempool_put(rxq->mb_pool, rxm);
+		 */
+		return;
+	}
+
+	if (cq_desc->status) {
+		stats->bad_cq_status++;
+		ionic_rx_recycle(q, q_desc_index, rxm);
+		return;
+	}
+
+	if (recv_args->nb_rx >= recv_args->nb_pkts) {
+		stats->no_room++;
+		ionic_rx_recycle(q, q_desc_index, rxm);
+		return;
+	}
+
+	if (cq_desc->len > max_frame_size ||
+			cq_desc->len == 0) {
+		stats->bad_len++;
+		ionic_rx_recycle(q, q_desc_index, rxm);
+		return;
+	}
+
+	rxm->data_off = RTE_PKTMBUF_HEADROOM;
+	rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
+	rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
+	rxm->pkt_len = cq_desc->len;
+	rxm->port = rxq->lif->port_id;
+
+	left = cq_desc->len;
+
+	rxm->data_len = RTE_MIN(buf_size, left);
+	left -= rxm->data_len;
+
+	rxm_seg = rxm->next;
+	while (rxm_seg && left) {
+		rxm_seg->data_len = RTE_MIN(buf_size, left);
+		left -= rxm_seg->data_len;
+
+		rxm_seg = rxm_seg->next;
+		rxm->nb_segs++;
+	}
+
+	/* RSS */
+	pkt_flags = PKT_RX_RSS_HASH;
+	rxm->hash.rss = cq_desc->rss_hash;
+
+	/* Vlan Strip */
+	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
+		pkt_flags |= PKT_RX_VLAN_STRIPPED;
+		rxm->vlan_tci = cq_desc->vlan_tci;
+	}
+
+	/* Checksum */
+	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
+		if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
+			pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+		else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
+			pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+
+		if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
+			(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
+			pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+		else if ((cq_desc->csum_flags &
+				IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
+				(cq_desc->csum_flags &
+				IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
+			pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+	}
+
+	rxm->ol_flags = pkt_flags;
+
+	/* Packet Type */
+	switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
+	case IONIC_PKT_TYPE_IPV4:
+		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+		break;
+	case IONIC_PKT_TYPE_IPV6:
+		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
+		break;
+	case IONIC_PKT_TYPE_IPV4_TCP:
+		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
+				RTE_PTYPE_L4_TCP;
+		break;
+	case IONIC_PKT_TYPE_IPV6_TCP:
+		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
+				RTE_PTYPE_L4_TCP;
+		break;
+	case IONIC_PKT_TYPE_IPV4_UDP:
+		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
+				RTE_PTYPE_L4_UDP;
+		break;
+	case IONIC_PKT_TYPE_IPV6_UDP:
+		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
+				RTE_PTYPE_L4_UDP;
+		break;
+	default:
+		{
+			struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
+					struct rte_ether_hdr *);
+			uint16_t ether_type = eth_h->ether_type;
+			if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
+				pkt_type = RTE_PTYPE_L2_ETHER_ARP;
+			else
+				pkt_type = RTE_PTYPE_UNKNOWN;
+			break;
+		}
+	}
+
+	rxm->packet_type = pkt_type;
+
+	recv_args->rx_pkts[recv_args->nb_rx] = rxm;
+	recv_args->nb_rx++;
+
+	stats->packets++;
+	stats->bytes += rxm->pkt_len;
+}
+
+static void
+ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
+		 struct rte_mbuf *mbuf)
+{
+	struct ionic_rxq_desc *desc_base = q->base;
+	struct ionic_rxq_desc *old = &desc_base[q_desc_index];
+	struct ionic_rxq_desc *new = &desc_base[q->head_idx];
+
+	new->addr = old->addr;
+	new->len = old->len;
+
+	ionic_q_post(q, true, ionic_rx_clean, mbuf);
+}
+
+static int __attribute__((cold))
+ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
+{
+	struct ionic_queue *q = &rxq->q;
+	struct ionic_rxq_desc *desc_base = q->base;
+	struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base;
+	struct ionic_rxq_desc *desc;
+	struct ionic_rxq_sg_desc *sg_desc;
+	struct ionic_rxq_sg_elem *elem;
+	rte_iova_t dma_addr;
+	uint32_t i, j, nsegs, buf_size, size;
+	bool ring_doorbell;
+
+	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+
+	/* Initialize software ring entries */
+	for (i = ionic_q_space_avail(q); i; i--) {
+		struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
+		struct rte_mbuf *prev_rxm_seg;
+
+		if (rxm == NULL) {
+			ionic_init_print(ERR, "RX mbuf alloc failed");
+			return -ENOMEM;
+		}
+
+		nsegs = (len + buf_size - 1) / buf_size;
+
+		desc = &desc_base[q->head_idx];
+		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
+		desc->addr = dma_addr;
+		desc->len = buf_size;
+		size = buf_size;
+		desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
+				IONIC_RXQ_DESC_OPCODE_SIMPLE;
+		rxm->next = NULL;
+
+		prev_rxm_seg = rxm;
+		sg_desc = &sg_desc_base[q->head_idx];
+		elem = sg_desc->elems;
+		for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
+			struct rte_mbuf *rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+			if (rxm_seg == NULL) {
+				ionic_init_print(ERR, "RX mbuf alloc failed");
+				return -ENOMEM;
+			}
+
+			dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(rxm_seg));
+			elem->addr = dma_addr;
+			elem->len = buf_size;
+			size += buf_size;
+			elem++;
+			rxm_seg->next = NULL;
+			prev_rxm_seg->next = rxm_seg;
+			prev_rxm_seg = rxm_seg;
+		}
+
+		if (size < len)
+			ionic_init_print(ERR, "Rx SG size is not sufficient (%d < %d)",
+					size, len);
+
+		ring_doorbell = ((q->head_idx + 1) &
+				IONIC_RX_RING_DOORBELL_STRIDE) == 0;
+
+		ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm);
+	}
+
+	return 0;
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int __attribute__((cold))
+ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	struct ionic_qcq *rxq;
+	int err;
+
+	ionic_init_print_call();
+
+	ionic_init_print(DEBUG, "Allocating RX queue buffers (size: %u)",
+			frame_size);
+
+	rxq = eth_dev->data->rx_queues[rx_queue_id];
+
+	err = ionic_lif_rxq_init(rxq);
+
+	if (err)
+		return err;
+
+	/* Allocate buffers for descriptor rings */
+	if (ionic_rx_fill(rxq, frame_size) != 0) {
+		ionic_init_print(ERR, "Could not alloc mbuf for queue:%d",
+				rx_queue_id);
+		return -1;
+	}
+
+	ionic_qcq_enable(rxq);
+
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static inline void __attribute__((cold))
+ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
+		void *service_cb_arg)
+{
+	struct ionic_queue *q = cq->bound_q;
+	struct ionic_desc_info *q_desc_info;
+	struct ionic_rxq_comp *cq_desc_base = cq->base;
+	struct ionic_rxq_comp *cq_desc;
+	bool more;
+	uint32_t curr_q_tail_idx, curr_cq_tail_idx;
+	uint32_t work_done = 0;
+
+	if (work_to_do == 0)
+		return;
+
+	cq_desc = &cq_desc_base[cq->tail_idx];
+	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
+		curr_cq_tail_idx = cq->tail_idx;
+		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+		if (cq->tail_idx == 0)
+			cq->done_color = !cq->done_color;
+
+		/* Prefetch the next 4 descriptors */
+		if ((cq->tail_idx & 0x3) == 0)
+			rte_prefetch0(&cq_desc_base[cq->tail_idx]); /* cq desc */
+
+		do {
+			more = (q->tail_idx != cq_desc->comp_index);
+
+			q_desc_info = &q->info[q->tail_idx];
+
+			curr_q_tail_idx = q->tail_idx;
+			q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+
+			/* Prefetch the next 4 descriptors */
+			if ((q->tail_idx & 0x3) == 0)
+				/* q desc info */
+				rte_prefetch0(&q->info[q->tail_idx]);
+
+			ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx,
+					q_desc_info->cb_arg, service_cb_arg);
+
+		} while (more);
+
+		if (++work_done == work_to_do)
+			break;
+
+		cq_desc = &cq_desc_base[cq->tail_idx];
+	}
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __attribute__((cold))
+ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+	struct ionic_qcq *rxq;
+
+	ionic_init_print_call();
+
+	rxq = eth_dev->data->rx_queues[rx_queue_id];
+
+	ionic_qcq_disable(rxq);
+
+	/* Flush */
+	ionic_rxq_service(&rxq->cq, -1, NULL);
+
+	ionic_lif_rxq_deinit(rxq);
+
+	eth_dev->data->rx_queue_state[rx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+uint16_t
+ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
+	uint32_t frame_size =
+			rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	struct ionic_cq *cq = &rxq->cq;
+	struct ionic_rx_service service_cb_arg;
+
+	service_cb_arg.rx_pkts = rx_pkts;
+	service_cb_arg.nb_pkts = nb_pkts;
+	service_cb_arg.nb_rx = 0;
+
+	ionic_rxq_service(cq, nb_pkts, &service_cb_arg);
+
+	ionic_rx_fill(rxq, frame_size);
+
+	return service_cb_arg.nb_rx;
+}
diff --git a/drivers/net/ionic/ionic_rxtx.h b/drivers/net/ionic/ionic_rxtx.h
new file mode 100644
index 000000000..8f1bbad39
--- /dev/null
+++ b/drivers/net/ionic/ionic_rxtx.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
+ */
+
+#ifndef _IONIC_RXTX_H_
+#define _IONIC_RXTX_H_
+
+#include <rte_mbuf.h>
+
+struct ionic_rx_service {
+	/* cb in */
+	struct rte_mbuf **rx_pkts;
+	uint16_t nb_pkts;
+	/* cb out */
+	uint16_t nb_rx;
+};
+
+uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
+int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+		uint16_t nb_desc, uint32_t socket_id,
+		const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);
+void ionic_dev_rx_queue_release(void *rxq);
+int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
+
+int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+		uint16_t nb_desc,  uint32_t socket_id,
+		const struct rte_eth_txconf *tx_conf);
+void ionic_dev_tx_queue_release(void *tx_queue);
+int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
+int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+void ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+		struct rte_eth_rxq_info *qinfo);
+void ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+		struct rte_eth_txq_info *qinfo);
+
+#endif /* _IONIC_RXTX_H_ */
diff --git a/drivers/net/ionic/meson.build b/drivers/net/ionic/meson.build
index ef202af7f..cc54b56be 100644
--- a/drivers/net/ionic/meson.build
+++ b/drivers/net/ionic/meson.build
@@ -6,6 +6,7 @@ version = 1
 sources = files(
 	'ionic_mac_api.c',
 	'ionic_rx_filter.c',
+	'ionic_rxtx.c',
 	'ionic_dev.c',
 	'ionic_ethdev.c',
 	'ionic_lif.c',


  parent reply	other threads:[~2019-10-12  0:28 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-12  0:26 [dpdk-dev] [PATCH 00/17] Series short description Alfredo Cardigliano
2019-10-12  0:26 ` [dpdk-dev] [PATCH 01/17] net/ionic: add skeleton Alfredo Cardigliano
2019-10-12  0:26 ` [dpdk-dev] [PATCH 02/17] net/ionic: add hardware structures definitions Alfredo Cardigliano
2019-10-12  0:26 ` [dpdk-dev] [PATCH 03/17] net/ionic: add log Alfredo Cardigliano
2019-10-12 15:23   ` Stephen Hemminger
2019-10-12  0:26 ` [dpdk-dev] [PATCH 04/17] net/ionic: register and initialize the adapter Alfredo Cardigliano
2019-10-12  0:26 ` [dpdk-dev] [PATCH 05/17] net/ionic: add port management commands Alfredo Cardigliano
2019-10-12  0:26 ` [dpdk-dev] [PATCH 06/17] net/ionic: add basic lif support Alfredo Cardigliano
2019-10-12 15:26   ` Stephen Hemminger
2019-10-12  0:27 ` [dpdk-dev] [PATCH 07/17] net/ionic: add doorbells Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 08/17] net/ionic: add adminq support Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 09/17] net/ionic: add notifyq support Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 10/17] net/ionic: add basic port operations Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 11/17] net/ionic: add RX filters support Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 12/17] net/ionic: net-ionic-add-flow-control-support Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 13/17] net/ionic: add RSS support Alfredo Cardigliano
2019-10-12  0:27 ` Alfredo Cardigliano [this message]
2019-10-12  0:27 ` [dpdk-dev] [PATCH 15/17] net/ionic: add stats Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 16/17] net/ionic: add TX checksum support Alfredo Cardigliano
2019-10-12  0:27 ` [dpdk-dev] [PATCH 17/17] net/ionic: read fw version Alfredo Cardigliano
2019-10-12 15:28 ` [dpdk-dev] [PATCH 00/17] Series short description Stephen Hemminger
2019-10-14  7:16   ` Alfredo Cardigliano
2019-10-14  7:56     ` Andrew Rybchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=157084005745.11524.16250957328673127517.stgit@devele \
    --to=cardigliano@ntop.org \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.