All of lore.kernel.org
 help / color / mirror / Atom feed
From: Wenzhuo Lu <wenzhuo.lu@intel.com>
To: dev@dpdk.org
Cc: Wenzhuo Lu <wenzhuo.lu@intel.com>,
	Qiming Yang <qiming.yang@intel.com>,
	Xiaoyun Li <xiaoyun.li@intel.com>,
	Jingjing Wu <jingjing.wu@intel.com>
Subject: [PATCH v5 16/31] net/ice: support device and queue ops
Date: Mon, 17 Dec 2018 15:37:24 +0800	[thread overview]
Message-ID: <1545032259-77179-17-git-send-email-wenzhuo.lu@intel.com> (raw)
In-Reply-To: <1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>

Normally when starting/stopping the device the queue
should be started and stopped. Support them both in
this patch.

Below ops are added,
dev_configure
dev_start
dev_stop
dev_close
dev_reset
rx_queue_start
rx_queue_stop
tx_queue_start
tx_queue_stop
rx_queue_setup
rx_queue_release
tx_queue_setup
tx_queue_release

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: Qiming Yang <qiming.yang@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
---
 config/common_base               |   2 +
 doc/guides/nics/features/ice.ini |   1 +
 doc/guides/nics/ice.rst          |   8 +
 drivers/net/ice/Makefile         |   3 +-
 drivers/net/ice/ice_ethdev.c     | 198 ++++++++-
 drivers/net/ice/ice_lan_rxtx.c   | 927 +++++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.h       |  20 +
 drivers/net/ice/meson.build      |   3 +-
 8 files changed, 1159 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/ice/ice_lan_rxtx.c

diff --git a/config/common_base b/config/common_base
index 872f440..a342760 100644
--- a/config/common_base
+++ b/config/common_base
@@ -303,6 +303,8 @@ CONFIG_RTE_LIBRTE_ICE_PMD=y
 CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n
 CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n
 CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y
+CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n
 
 # Compile burst-oriented AVF PMD driver
 #
diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
index 085e848..a43a9cd 100644
--- a/doc/guides/nics/features/ice.ini
+++ b/doc/guides/nics/features/ice.ini
@@ -4,6 +4,7 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Queue start/stop     = Y
 BSD nic_uio          = Y
 Linux UIO            = Y
 Linux VFIO           = Y
diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst
index 946ed04..96a594f 100644
--- a/doc/guides/nics/ice.rst
+++ b/doc/guides/nics/ice.rst
@@ -38,6 +38,14 @@ Please note that enabling debugging options may affect system performance.
 
   Toggle display of generic debugging messages.
 
+- ``CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC`` (default ``y``)
+
+  Toggle bulk allocation for RX.
+
+- ``CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC`` (default ``n``)
+
+  Toggle to use a 16-byte RX descriptor, by default the RX descriptor is 32 byte.
+
 Runtime Config Options
 ~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 70f23e3..ff93800 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -11,7 +11,7 @@ LIB = librte_pmd_ice.a
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 
-LDLIBS += -lrte_eal -lrte_ethdev -lrte_kvargs -lrte_bus_pci
+LDLIBS += -lrte_eal -lrte_ethdev -lrte_kvargs -lrte_bus_pci -lrte_mempool
 
 EXPORT_MAP := rte_pmd_ice_version.map
 
@@ -50,5 +50,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch.c
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_nvm.c
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_lan_rxtx.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 4f0c819..2c86b3d 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -14,6 +14,12 @@
 int ice_logtype_init;
 int ice_logtype_driver;
 
+static int ice_dev_configure(struct rte_eth_dev *dev);
+static int ice_dev_start(struct rte_eth_dev *dev);
+static void ice_dev_stop(struct rte_eth_dev *dev);
+static void ice_dev_close(struct rte_eth_dev *dev);
+static int ice_dev_reset(struct rte_eth_dev *dev);
+
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
@@ -22,7 +28,19 @@
 };
 
 static const struct eth_dev_ops ice_eth_dev_ops = {
-	.dev_configure                = NULL,
+	.dev_configure                = ice_dev_configure,
+	.dev_start                    = ice_dev_start,
+	.dev_stop                     = ice_dev_stop,
+	.dev_close                    = ice_dev_close,
+	.dev_reset                    = ice_dev_reset,
+	.rx_queue_start               = ice_rx_queue_start,
+	.rx_queue_stop                = ice_rx_queue_stop,
+	.tx_queue_start               = ice_tx_queue_start,
+	.tx_queue_stop                = ice_tx_queue_stop,
+	.rx_queue_setup               = ice_rx_queue_setup,
+	.rx_queue_release             = ice_rx_queue_release,
+	.tx_queue_setup               = ice_tx_queue_setup,
+	.tx_queue_release             = ice_tx_queue_release,
 };
 
 static void
@@ -560,11 +578,41 @@
 }
 
 static void
+ice_dev_stop(struct rte_eth_dev *dev)
+{
+	struct rte_eth_dev_data *data = dev->data;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint16_t i;
+
+	/* avoid stopping again */
+	if (pf->adapter_stopped)
+		return;
+
+	/* stop and clear all Rx queues */
+	for (i = 0; i < data->nb_rx_queues; i++)
+		ice_rx_queue_stop(dev, i);
+
+	/* stop and clear all Tx queues */
+	for (i = 0; i < data->nb_tx_queues; i++)
+		ice_tx_queue_stop(dev, i);
+
+	/* Clear all queues and release mbufs */
+	ice_clear_queues(dev);
+
+	pf->adapter_stopped = true;
+}
+
+static void
 ice_dev_close(struct rte_eth_dev *dev)
 {
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	ice_dev_stop(dev);
+
+	/* release all queue resource */
+	ice_free_queues(dev);
+
 	ice_res_pool_destroy(&pf->msix_pool);
 	ice_release_vsi(pf->main_vsi);
 
@@ -595,6 +643,154 @@
 }
 
 static int
+ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+	struct ice_adapter *ad =
+		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	/* Initialize to TRUE. If any of Rx queues doesn't meet the
+	 * bulk allocation or vector Rx preconditions we will reset it.
+	 */
+	ad->rx_bulk_alloc_allowed = true;
+	ad->tx_simple_allowed = true;
+
+	return 0;
+}
+
+static int ice_init_rss(struct ice_pf *pf)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_vsi *vsi = pf->main_vsi;
+	struct rte_eth_dev *dev = pf->adapter->eth_dev;
+	struct rte_eth_rss_conf *rss_conf;
+	struct ice_aqc_get_set_rss_keys key;
+	uint16_t i, nb_q;
+	int ret = 0;
+
+	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+	nb_q = dev->data->nb_rx_queues;
+	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
+	vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
+
+	if (!vsi->rss_key)
+		vsi->rss_key = rte_zmalloc(NULL,
+					   vsi->rss_key_size, 0);
+	if (!vsi->rss_lut)
+		vsi->rss_lut = rte_zmalloc(NULL,
+					   vsi->rss_lut_size, 0);
+
+	/* configure RSS key */
+	if (!rss_conf->rss_key) {
+		/* Calculate the default hash key */
+		for (i = 0; i <= vsi->rss_key_size; i++)
+			vsi->rss_key[i] = (uint8_t)rte_rand();
+	} else {
+		rte_memcpy(vsi->rss_key, rss_conf->rss_key,
+			   RTE_MIN(rss_conf->rss_key_len,
+				   vsi->rss_key_size));
+	}
+	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
+	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
+	if (ret)
+		return -EINVAL;
+
+	/* init RSS LUT table */
+	for (i = 0; i < vsi->rss_lut_size; i++)
+		vsi->rss_lut[i] = i % nb_q;
+
+	ret = ice_aq_set_rss_lut(hw, vsi->idx,
+				 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
+				 vsi->rss_lut, vsi->rss_lut_size);
+	if (ret)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+ice_dev_start(struct rte_eth_dev *dev)
+{
+	struct rte_eth_dev_data *data = dev->data;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint16_t nb_rxq = 0;
+	uint16_t nb_txq, i;
+	int ret;
+
+	/* program Tx queues' context in hardware */
+	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
+		ret = ice_tx_queue_start(dev, nb_txq);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
+			goto tx_err;
+		}
+	}
+
+	/* program Rx queues' context in hardware*/
+	for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
+		ret = ice_rx_queue_start(dev, nb_rxq);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
+			goto rx_err;
+		}
+	}
+
+	ret = ice_init_rss(pf);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
+		goto rx_err;
+	}
+
+	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
+				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
+				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
+				     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
+				     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
+				     ICE_AQ_LINK_EVENT_AN_COMPLETED |
+				     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
+				     NULL);
+	if (ret != ICE_SUCCESS)
+		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+	pf->adapter_stopped = false;
+
+	return 0;
+
+	/* stop the started queues if failed to start all queues */
+rx_err:
+	for (i = 0; i < nb_rxq; i++)
+		ice_rx_queue_stop(dev, i);
+tx_err:
+	for (i = 0; i < nb_txq; i++)
+		ice_tx_queue_stop(dev, i);
+
+	return -EIO;
+}
+
+static int
+ice_dev_reset(struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (dev->data->sriov.active)
+		return -ENOTSUP;
+
+	ret = ice_dev_uninit(dev);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
+		return -ENXIO;
+	}
+
+	ret = ice_dev_init(dev);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_lan_rxtx.c b/drivers/net/ice/ice_lan_rxtx.c
new file mode 100644
index 0000000..5c2301a
--- /dev/null
+++ b/drivers/net/ice/ice_lan_rxtx.c
@@ -0,0 +1,927 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_net.h>
+
+#include "ice_rxtx.h"
+
+#define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
+
+#define ICE_TX_CKSUM_OFFLOAD_MASK (		 \
+		PKT_TX_IP_CKSUM |		 \
+		PKT_TX_L4_MASK |		 \
+		PKT_TX_TCP_SEG |		 \
+		PKT_TX_OUTER_IP_CKSUM)
+
+#define ICE_RX_ERR_BITS 0x3f
+
+static enum ice_status
+ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
+{
+	struct ice_vsi *vsi = rxq->vsi;
+	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+	struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
+	struct ice_rlan_ctx rx_ctx;
+	enum ice_status err;
+	uint16_t buf_size, len;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	uint32_t regval;
+
+	/**
+	 * The kernel driver uses flex descriptor. It sets the register
+	 * to flex descriptor mode.
+	 * DPDK uses legacy descriptor. It should set the register back
+	 * to the default value, then uses legacy descriptor mode.
+	 */
+	regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+		 QRXFLXP_CNTXT_RXDID_PRIO_M;
+	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
+
+	/* Set buffer size as the head split is disabled. */
+	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+			      RTE_PKTMBUF_HEADROOM);
+	rxq->rx_hdr_len = 0;
+	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
+	len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
+	rxq->max_pkt_len = RTE_MIN(len,
+				   dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+		if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+		    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "maximum packet length must "
+				    "be larger than %u and smaller than %u,"
+				    "as jumbo frame is enabled",
+				    (uint32_t)ETHER_MAX_LEN,
+				    (uint32_t)ICE_FRAME_SIZE_MAX);
+			return -EINVAL;
+		}
+	} else {
+		if (rxq->max_pkt_len < ETHER_MIN_LEN ||
+		    rxq->max_pkt_len > ETHER_MAX_LEN) {
+			PMD_DRV_LOG(ERR, "maximum packet length must be "
+				    "larger than %u and smaller than %u, "
+				    "as jumbo frame is disabled",
+				    (uint32_t)ETHER_MIN_LEN,
+				    (uint32_t)ETHER_MAX_LEN);
+			return -EINVAL;
+		}
+	}
+
+	memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+	rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
+	rx_ctx.qlen = rxq->nb_rx_desc;
+	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
+	rx_ctx.dtype = 0; /* No Header Split mode */
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	rx_ctx.dsize = 1; /* 32B descriptors */
+#endif
+	rx_ctx.rxmax = rxq->max_pkt_len;
+	/* TPH: Transaction Layer Packet (TLP) processing hints */
+	rx_ctx.tphrdesc_ena = 1;
+	rx_ctx.tphwdesc_ena = 1;
+	rx_ctx.tphdata_ena = 1;
+	rx_ctx.tphhead_ena = 1;
+	/* Low Receive Queue Threshold defined in 64 descriptors units.
+	 * When the number of free descriptors goes below the lrxqthresh,
+	 * an immediate interrupt is triggered.
+	 */
+	rx_ctx.lrxqthresh = 2;
+	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
+	rx_ctx.l2tsel = 1;
+	rx_ctx.showiv = 0;
+
+	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
+			    rxq->queue_id);
+		return -EINVAL;
+	}
+	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
+			    rxq->queue_id);
+		return -EINVAL;
+	}
+
+	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+			      RTE_PKTMBUF_HEADROOM);
+
+	/* Check if scattered RX needs to be used. */
+	if ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size)
+		dev->data->scattered_rx = 1;
+
+	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
+
+	/* Init the Rx tail register*/
+	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+	return 0;
+}
+
+/* Allocate mbufs for all descriptors in rx queue */
+static int
+ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
+{
+	struct ice_rx_entry *rxe = rxq->sw_ring;
+	uint64_t dma_addr;
+	uint16_t i;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		volatile union ice_rx_desc *rxd;
+		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
+
+		if (unlikely(!mbuf)) {
+			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+			return -ENOMEM;
+		}
+
+		rte_mbuf_refcnt_set(mbuf, 1);
+		mbuf->next = NULL;
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->nb_segs = 1;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+		rxd = &rxq->rx_ring[i];
+		rxd->read.pkt_addr = dma_addr;
+		rxd->read.hdr_addr = 0;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+		rxd->read.rsvd1 = 0;
+		rxd->read.rsvd2 = 0;
+#endif
+		rxe[i].mbuf = mbuf;
+	}
+
+	return 0;
+}
+
+/* Free all mbufs for descriptors in rx queue */
+static void
+ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+{
+	uint16_t i;
+
+	if (!rxq || !rxq->sw_ring) {
+		PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
+		return;
+	}
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		if (rxq->sw_ring[i].mbuf) {
+			rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+			rxq->sw_ring[i].mbuf = NULL;
+		}
+	}
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+		if (rxq->rx_nb_avail == 0)
+			return;
+		for (i = 0; i < rxq->rx_nb_avail; i++) {
+			struct rte_mbuf *mbuf;
+
+			mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
+			rte_pktmbuf_free_seg(mbuf);
+		}
+		rxq->rx_nb_avail = 0;
+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
+}
+
+/* turn on or off rx queue
+ * @q_idx: queue index in pf scope
+ * @on: turn on or off the queue
+ */
+static int
+ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
+{
+	uint32_t reg;
+	uint16_t j;
+
+	/* QRX_CTRL = QRX_ENA */
+	reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
+
+	if (on) {
+		if (reg & QRX_CTRL_QENA_STAT_M)
+			return 0; /* Already on, skip */
+		reg |= QRX_CTRL_QENA_REQ_M;
+	} else {
+		if (!(reg & QRX_CTRL_QENA_STAT_M))
+			return 0; /* Already off, skip */
+		reg &= ~QRX_CTRL_QENA_REQ_M;
+	}
+
+	/* Write the register */
+	ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
+	/* Check the result. It is said that QENA_STAT
+	 * follows the QENA_REQ not more than 10 use.
+	 * TODO: need to change the wait counter later
+	 */
+	for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
+		rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
+		reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
+		if (on) {
+			if ((reg & QRX_CTRL_QENA_REQ_M) &&
+			    (reg & QRX_CTRL_QENA_STAT_M))
+				break;
+		} else {
+			if (!(reg & QRX_CTRL_QENA_REQ_M) &&
+			    !(reg & QRX_CTRL_QENA_STAT_M))
+				break;
+		}
+	}
+
+	/* Check if it is timeout */
+	if (j >= ICE_CHK_Q_ENA_COUNT) {
+		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
+			    (on ? "enable" : "disable"), q_idx);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static inline int
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
+#else
+ice_check_rx_burst_bulk_alloc_preconditions
+	(__rte_unused struct ice_rx_queue *rxq)
+#endif
+{
+	int ret = 0;
+
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+	if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "ICE_RX_MAX_BURST=%d",
+			     rxq->rx_free_thresh, ICE_RX_MAX_BURST);
+		ret = -EINVAL;
+	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "rxq->nb_rx_desc=%d",
+			     rxq->rx_free_thresh, rxq->nb_rx_desc);
+		ret = -EINVAL;
+	} else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->rx_free_thresh=%d",
+			     rxq->nb_rx_desc, rxq->rx_free_thresh);
+		ret = -EINVAL;
+	}
+#else
+	ret = -EINVAL;
+#endif
+
+	return ret;
+}
+
+/* reset fields in ice_rx_queue back to default */
+static void
+ice_reset_rx_queue(struct ice_rx_queue *rxq)
+{
+	unsigned i;
+	uint16_t len;
+
+	if (!rxq) {
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+		return;
+	}
+
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+	if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+		len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
+	else
+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
+		len = rxq->nb_rx_desc;
+
+	for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+	for (i = 0; i < ICE_RX_MAX_BURST; ++i)
+		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+
+	rxq->rx_nb_avail = 0;
+	rxq->rx_next_avail = 0;
+	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
+
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+}
+
+int
+ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ice_rx_queue *rxq;
+	int err;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (rx_queue_id >= dev->data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
+			    rx_queue_id, dev->data->nb_rx_queues);
+		return -EINVAL;
+	}
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+	if (!rxq || !rxq->q_set) {
+		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
+			    rx_queue_id);
+		return -EINVAL;
+	}
+
+	err = ice_program_hw_rx_queue(rxq);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to program RX queue %u",
+			    rx_queue_id);
+		return -EIO;
+	}
+
+	err = ice_alloc_rx_queue_mbufs(rxq);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+		return -ENOMEM;
+	}
+
+	rte_wmb();
+
+	/* Init the RX tail register. */
+	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+	err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+			    rx_queue_id);
+
+		ice_rx_queue_release_mbufs(rxq);
+		ice_reset_rx_queue(rxq);
+		return -EINVAL;
+	}
+
+	dev->data->rx_queue_state[rx_queue_id] =
+		RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+int
+ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ice_rx_queue *rxq;
+	int err;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (rx_queue_id < dev->data->nb_rx_queues) {
+		rxq = dev->data->rx_queues[rx_queue_id];
+
+		err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+				    rx_queue_id);
+			return -EINVAL;
+		}
+		ice_rx_queue_release_mbufs(rxq);
+		ice_reset_rx_queue(rxq);
+		dev->data->rx_queue_state[rx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STOPPED;
+	}
+
+	return 0;
+}
+
+int
+ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ice_tx_queue *txq;
+	int err;
+	struct ice_vsi *vsi;
+	struct ice_hw *hw;
+	struct ice_aqc_add_tx_qgrp txq_elem;
+	struct ice_tlan_ctx tx_ctx;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (tx_queue_id >= dev->data->nb_tx_queues) {
+		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
+			    tx_queue_id, dev->data->nb_tx_queues);
+		return -EINVAL;
+	}
+
+	txq = dev->data->tx_queues[tx_queue_id];
+	if (!txq || !txq->q_set) {
+		PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
+			    tx_queue_id);
+		return -EINVAL;
+	}
+
+	vsi = txq->vsi;
+	hw = ICE_VSI_TO_HW(vsi);
+
+	memset(&txq_elem, 0, sizeof(txq_elem));
+	memset(&tx_ctx, 0, sizeof(tx_ctx));
+	txq_elem.num_txqs = 1;
+	txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+
+	tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
+	tx_ctx.qlen = txq->nb_tx_desc;
+	tx_ctx.pf_num = hw->pf_id;
+	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+	tx_ctx.src_vsi = vsi->vsi_id;
+	tx_ctx.port_num = hw->port_info->lport;
+	tx_ctx.tso_ena = 1; /* tso enable */
+	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
+	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+
+	ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+		    ice_tlan_ctx_info);
+
+	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
+
+	/* Init the Tx tail register*/
+	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
+
+	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
+			      sizeof(txq_elem), NULL);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to add lan txq");
+		return -EIO;
+	}
+	/* store the schedule node id */
+	txq->q_teid = txq_elem.txqs[0].q_teid;
+
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+/* Free all mbufs for descriptors in tx queue */
+static void
+ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+{
+	uint16_t i;
+
+	if (!txq || !txq->sw_ring) {
+		PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
+		return;
+	}
+
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		if (txq->sw_ring[i].mbuf) {
+			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+			txq->sw_ring[i].mbuf = NULL;
+		}
+	}
+}
+
+static void
+ice_reset_tx_queue(struct ice_tx_queue *txq)
+{
+	struct ice_tx_entry *txe;
+	uint16_t i, prev, size;
+
+	if (!txq) {
+		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+		return;
+	}
+
+	txe = txq->sw_ring;
+	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
+	for (i = 0; i < size; i++)
+		((volatile char *)txq->tx_ring)[i] = 0;
+
+	prev = (uint16_t)(txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
+
+		txd->cmd_type_offset_bsz =
+			rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
+		txe[i].mbuf =  NULL;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	txq->tx_tail = 0;
+	txq->nb_tx_used = 0;
+
+	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+}
+
+int
+ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ice_tx_queue *txq;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	enum ice_status status;
+	uint16_t q_ids[1];
+	uint32_t q_teids[1];
+
+	if (tx_queue_id >= dev->data->nb_tx_queues) {
+		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
+			    tx_queue_id, dev->data->nb_tx_queues);
+		return -EINVAL;
+	}
+
+	txq = dev->data->tx_queues[tx_queue_id];
+	if (!txq) {
+		PMD_DRV_LOG(ERR, "TX queue %u is not available",
+			    tx_queue_id);
+		return -EINVAL;
+	}
+
+	q_ids[0] = txq->reg_idx;
+	q_teids[0] = txq->q_teid;
+
+	status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
+				 ICE_NO_RESET, 0, NULL);
+	if (status != ICE_SUCCESS) {
+		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
+		return -EINVAL;
+	}
+
+	ice_tx_queue_release_mbufs(txq);
+	ice_reset_tx_queue(txq);
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+int
+ice_rx_queue_setup(struct rte_eth_dev *dev,
+		   uint16_t queue_idx,
+		   uint16_t nb_desc,
+		   unsigned int socket_id,
+		   const struct rte_eth_rxconf *rx_conf,
+		   struct rte_mempool *mp)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_adapter *ad =
+		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_vsi *vsi = pf->main_vsi;
+	struct ice_rx_queue *rxq;
+	const struct rte_memzone *rz;
+	uint32_t ring_size;
+	uint16_t len;
+	int use_def_burst_func = 1;
+
+	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
+	    nb_desc > ICE_MAX_RING_DESC ||
+	    nb_desc < ICE_MIN_RING_DESC) {
+		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
+			     "invalid", nb_desc);
+		return -EINVAL;
+	}
+
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_idx]) {
+		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* Allocate the rx queue data structure */
+	rxq = rte_zmalloc_socket(NULL,
+				 sizeof(struct ice_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (!rxq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+			     "rx queue data structure");
+		return -ENOMEM;
+	}
+	rxq->mp = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+	rxq->queue_id = queue_idx;
+
+	rxq->reg_idx = vsi->base_queue + queue_idx;
+	rxq->port_id = dev->data->port_id;
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		rxq->crc_len = ETHER_CRC_LEN;
+	else
+		rxq->crc_len = 0;
+
+	rxq->drop_en = rx_conf->rx_drop_en;
+	rxq->vsi = vsi;
+	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+	/* Allocate the maximun number of RX ring hardware descriptor. */
+	len = ICE_MAX_RING_DESC;
+
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+	/**
+	 * Allocating a little more memory because vectorized/bulk_alloc Rx
+	 * functions doesn't check boundaries each time.
+	 */
+	len += ICE_RX_MAX_BURST;
+#endif
+
+	/* Allocate the maximum number of RX ring hardware descriptor. */
+	ring_size = sizeof(union ice_rx_desc) * len;
+	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+				      ring_size, ICE_RING_BASE_ALIGN,
+				      socket_id);
+	if (!rz) {
+		ice_rx_queue_release(rxq);
+		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+		return -ENOMEM;
+	}
+
+	/* Zero all the descriptors in the ring. */
+	memset(rz->addr, 0, ring_size);
+
+	rxq->rx_ring_phys_addr = rz->phys_addr;
+	rxq->rx_ring = (union ice_rx_desc *)rz->addr;
+
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+	len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
+#else
+	len = nb_desc;
+#endif
+
+	/* Allocate the software ring. */
+	rxq->sw_ring = rte_zmalloc_socket(NULL,
+					  sizeof(struct ice_rx_entry) * len,
+					  RTE_CACHE_LINE_SIZE,
+					  socket_id);
+	if (!rxq->sw_ring) {
+		ice_rx_queue_release(rxq);
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+		return -ENOMEM;
+	}
+
+	ice_reset_rx_queue(rxq);
+	rxq->q_set = TRUE;
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
+
+	if (!use_def_burst_func) {
+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+			     "satisfied. Rx Burst Bulk Alloc function will be "
+			     "used on port=%d, queue=%d.",
+			     rxq->port_id, rxq->queue_id);
+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
+	} else {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+			     "not satisfied, Scattered Rx is requested, "
+			     "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
+			     "not enabled on port=%d, queue=%d.",
+			     rxq->port_id, rxq->queue_id);
+		ad->rx_bulk_alloc_allowed = false;
+	}
+
+	return 0;
+}
+
+void
+ice_rx_queue_release(void *rxq)
+{
+	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
+
+	if (!q) {
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+		return;
+	}
+
+	ice_rx_queue_release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_free(q);
+}
+
+int
+ice_tx_queue_setup(struct rte_eth_dev *dev,
+		   uint16_t queue_idx,
+		   uint16_t nb_desc,
+		   unsigned int socket_id,
+		   const struct rte_eth_txconf *tx_conf)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_vsi *vsi = pf->main_vsi;
+	struct ice_tx_queue *txq;
+	const struct rte_memzone *tz;
+	uint32_t ring_size;
+	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
+	    nb_desc > ICE_MAX_RING_DESC ||
+	    nb_desc < ICE_MIN_RING_DESC) {
+		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
+			     "invalid", nb_desc);
+		return -EINVAL;
+	}
+
+	/**
+	 * The following two parameters control the setting of the RS bit on
+	 * transmit descriptors. TX descriptors will have their RS bit set
+	 * after txq->tx_rs_thresh descriptors have been used. The TX
+	 * descriptor ring will be cleaned after txq->tx_free_thresh
+	 * descriptors are used or if the number of descriptors required to
+	 * transmit a packet is greater than the number of free TX descriptors.
+	 *
+	 * The following constraints must be satisfied:
+	 *  - tx_rs_thresh must be greater than 0.
+	 *  - tx_rs_thresh must be less than the size of the ring minus 2.
+	 *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
+	 *  - tx_rs_thresh must be a divisor of the ring size.
+	 *  - tx_free_thresh must be greater than 0.
+	 *  - tx_free_thresh must be less than the size of the ring minus 3.
+	 *
+	 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+	 * race condition, hence the maximum threshold constraints. When set
+	 * to zero use default values.
+	 */
+	tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
+				  tx_conf->tx_rs_thresh :
+				  ICE_DEFAULT_TX_RSBIT_THRESH);
+	tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
+				    tx_conf->tx_free_thresh :
+				    ICE_DEFAULT_TX_FREE_THRESH);
+	if (tx_rs_thresh >= (nb_desc - 2)) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+			     "number of TX descriptors minus 2. "
+			     "(tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
+		return -EINVAL;
+	}
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+			     "tx_free_thresh must be less than the "
+			     "number of TX descriptors minus 3. "
+			     "(tx_free_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
+		return -EINVAL;
+	}
+	if (tx_rs_thresh > tx_free_thresh) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
+			     "equal to tx_free_thresh. (tx_free_thresh=%u"
+			     " tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
+		return -EINVAL;
+	}
+	if ((nb_desc % tx_rs_thresh) != 0) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_rs_thresh=%u"
+			     " port=%d queue=%d)",
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
+		return -EINVAL;
+	}
+	if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
+		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+			     "tx_rs_thresh is greater than 1. "
+			     "(tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
+		return -EINVAL;
+	}
+
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_idx]) {
+		ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	/* Allocate the TX queue data structure. */
+	txq = rte_zmalloc_socket(NULL,
+				 sizeof(struct ice_tx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (!txq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+			     "tx queue structure");
+		return -ENOMEM;
+	}
+
+	/* Allocate TX hardware ring descriptors. */
+	ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
+	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+				      ring_size, ICE_RING_BASE_ALIGN,
+				      socket_id);
+	if (!tz) {
+		ice_tx_queue_release(txq);
+		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+		return -ENOMEM;
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->tx_rs_thresh = tx_rs_thresh;
+	txq->tx_free_thresh = tx_free_thresh;
+	txq->pthresh = tx_conf->tx_thresh.pthresh;
+	txq->hthresh = tx_conf->tx_thresh.hthresh;
+	txq->wthresh = tx_conf->tx_thresh.wthresh;
+	txq->queue_id = queue_idx;
+
+	txq->reg_idx = vsi->base_queue + queue_idx;
+	txq->port_id = dev->data->port_id;
+	txq->offloads = offloads;
+	txq->vsi = vsi;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+	txq->tx_ring_phys_addr = tz->phys_addr;
+	txq->tx_ring = (struct ice_tx_desc *)tz->addr;
+
+	/* Allocate software ring */
+	txq->sw_ring =
+		rte_zmalloc_socket(NULL,
+				   sizeof(struct ice_tx_entry) * nb_desc,
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (!txq->sw_ring) {
+		ice_tx_queue_release(txq);
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+		return -ENOMEM;
+	}
+
+	ice_reset_tx_queue(txq);
+	txq->q_set = TRUE;
+	dev->data->tx_queues[queue_idx] = txq;
+
+	return 0;
+}
+
+void
+ice_tx_queue_release(void *txq)
+{
+	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
+
+	if (!q) {
+		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
+		return;
+	}
+
+	ice_tx_queue_release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_free(q);
+}
+
+void
+ice_clear_queues(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
+		ice_reset_tx_queue(dev->data->tx_queues[i]);
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
+		ice_reset_rx_queue(dev->data->rx_queues[i]);
+	}
+}
+
+void
+ice_free_queues(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		if (!dev->data->rx_queues[i])
+			continue;
+		ice_rx_queue_release(dev->data->rx_queues[i]);
+		dev->data->rx_queues[i] = NULL;
+	}
+	dev->data->nb_rx_queues = 0;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		if (!dev->data->tx_queues[i])
+			continue;
+		ice_tx_queue_release(dev->data->tx_queues[i]);
+		dev->data->tx_queues[i] = NULL;
+	}
+	dev->data->nb_tx_queues = 0;
+}
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index c37dc23..088a206 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -114,4 +114,24 @@ struct ice_tx_queue {
 		uint64_t outer_l3_len:16; /* outer L3 Header Length */
 	};
 };
+
+int ice_rx_queue_setup(struct rte_eth_dev *dev,
+		       uint16_t queue_idx,
+		       uint16_t nb_desc,
+		       unsigned int socket_id,
+		       const struct rte_eth_rxconf *rx_conf,
+		       struct rte_mempool *mp);
+int ice_tx_queue_setup(struct rte_eth_dev *dev,
+		       uint16_t queue_idx,
+		       uint16_t nb_desc,
+		       unsigned int socket_id,
+		       const struct rte_eth_txconf *tx_conf);
+int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void ice_rx_queue_release(void *rxq);
+void ice_tx_queue_release(void *txq);
+void ice_clear_queues(struct rte_eth_dev *dev);
+void ice_free_queues(struct rte_eth_dev *dev);
 #endif /* _ICE_RXTX_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 9ed7b27..beb0d39 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -5,7 +5,8 @@ subdir('base')
 objs = [base_objs]
 
 sources = files(
-	'ice_ethdev.c'
+	'ice_ethdev.c',
+	'ice_lan_rxtx.c'
 	)
 
 deps += ['hash']
-- 
1.9.3

  parent reply	other threads:[~2018-12-17  7:33 UTC|newest]

Thread overview: 309+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-23  6:56 [PATCH 00/19] A new net PMD - ice Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 01/19] net/ice: add base code Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 02/19] net/ice: support device initialization Wenzhuo Lu
2018-11-23  7:56   ` Varghese, Vipin
2018-11-26  5:09     ` Li, Xiaoyun
2018-11-26  5:13       ` Varghese, Vipin
2018-11-26  5:19         ` Li, Xiaoyun
2018-11-26  5:22           ` Varghese, Vipin
2018-11-23  6:56 ` [PATCH 03/19] net/ice: support device and queue ops Wenzhuo Lu
2018-12-03 15:24   ` Rami Rosen
2018-12-03 15:43     ` Rami Rosen
2018-12-06  2:53     ` Lu, Wenzhuo
2018-11-23  6:56 ` [PATCH 04/19] net/ice: support getting device information Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 05/19] net/ice: support packet type getting Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 06/19] net/ice: support link update Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 07/19] net/ice: support MTU setting Wenzhuo Lu
2018-11-23  9:58   ` Varghese, Vipin
2018-11-26  3:38     ` Yang, Qiming
2018-11-26  3:58       ` Varghese, Vipin
2018-11-23  6:56 ` [PATCH 08/19] net/ice: support MAC ops Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 09/19] net/ice: support VLAN ops Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 10/19] net/ice: support RSS Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 11/19] net/ice: support RX queue interruption Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 12/19] net/ice: support FW version getting Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 13/19] net/ice: support EEPROM information getting Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 14/19] net/ice: support statistics Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 15/19] net/ice: support queue information getting Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 16/19] net/ice: support basic RX/TX Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 17/19] net/ice: support advance RX/TX Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 18/19] net/ice: support descriptor ops Wenzhuo Lu
2018-11-23  6:56 ` [PATCH 19/19] doc: add ICE description and update release note Wenzhuo Lu
2018-11-23  7:45   ` Varghese, Vipin
2018-11-26  3:42     ` Yang, Qiming
2018-11-26  3:59       ` Varghese, Vipin
2018-11-23 11:00 ` [PATCH 00/19] A new net PMD - ice Thomas Monjalon
2018-12-05  6:39   ` Lu, Wenzhuo
2018-12-05  7:28     ` Thomas Monjalon
2018-12-05  8:19       ` Lu, Wenzhuo
2018-12-03  7:06 ` [PATCH v2 00/20] " Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 01/20] net/ice: add base code Wenzhuo Lu
2018-12-04  4:18     ` Varghese, Vipin
2018-12-06  3:27       ` Lu, Wenzhuo
2018-12-06  4:28         ` Varghese, Vipin
2018-12-06  5:55           ` Lu, Wenzhuo
2018-12-06  6:03             ` Varghese, Vipin
2018-12-06  6:23               ` Ferruh Yigit
2018-12-06  6:38               ` Lu, Wenzhuo
2018-12-06  6:41                 ` Varghese, Vipin
2018-12-06  7:06                   ` Zhang, Qi Z
2018-12-06  7:17                   ` Lu, Wenzhuo
2018-12-03  7:06   ` [PATCH v2 02/20] net/ice: support device initialization Wenzhuo Lu
2018-12-03  9:07     ` Varghese, Vipin
2018-12-04  4:40     ` Varghese, Vipin
2018-12-06  5:01       ` Lu, Wenzhuo
2018-12-06  5:33         ` Varghese, Vipin
2018-12-06  6:13           ` Lu, Wenzhuo
2018-12-06  6:31             ` Varghese, Vipin
2018-12-06  7:04               ` Lu, Wenzhuo
     [not found]                 ` <039ED4275CED7440929022BC67E70611532FA732@SHSMSX103.ccr.corp.intel.com>
     [not found]                   ` <6A0DE07E22DDAD4C9103DF62FEBC09093FE11879@shsmsx102.ccr.corp.intel.com>
     [not found]                     ` <039ED4275CED7440929022BC67E70611532FA76F@SHSMSX103.ccr.corp.intel.com>
     [not found]                       ` <6A0DE07E22DDAD4C9103DF62FEBC09093FE1188F@shsmsx102.ccr.corp.intel.com>
2018-12-13  5:16                         ` Varghese, Vipin
2018-12-03  7:06   ` [PATCH v2 03/20] net/ice: support device and queue ops Wenzhuo Lu
2018-12-04  4:53     ` Varghese, Vipin
2018-12-06  5:03       ` Lu, Wenzhuo
2018-12-06  5:26         ` Varghese, Vipin
2018-12-06 11:52           ` Ananyev, Konstantin
2018-12-06 14:16             ` Varghese, Vipin
2018-12-07  1:02               ` Lu, Wenzhuo
2018-12-03  7:06   ` [PATCH v2 04/20] net/ice: support getting device information Wenzhuo Lu
2018-12-04  4:59     ` Varghese, Vipin
2018-12-06  5:28       ` Lu, Wenzhuo
2018-12-06  5:49         ` Varghese, Vipin
2018-12-03  7:06   ` [PATCH v2 05/20] net/ice: support packet type getting Wenzhuo Lu
2018-12-04  5:19     ` Varghese, Vipin
2018-12-06  5:34       ` Lu, Wenzhuo
2018-12-03  7:06   ` [PATCH v2 06/20] net/ice: support link update Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 07/20] net/ice: support MTU setting Wenzhuo Lu
2018-12-04  5:25     ` Varghese, Vipin
2018-12-04  5:51       ` Varghese, Vipin
2018-12-06  5:41         ` Lu, Wenzhuo
2018-12-06  5:56           ` Varghese, Vipin
2018-12-06  5:35       ` Lu, Wenzhuo
2018-12-03  7:06   ` [PATCH v2 08/20] net/ice: support MAC ops Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 09/20] net/ice: support VLAN ops Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 10/20] net/ice: support RSS Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 11/20] net/ice: support RX queue interruption Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 12/20] net/ice: support FW version getting Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 13/20] net/ice: support EEPROM information getting Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 14/20] net/ice: support statistics Wenzhuo Lu
2018-12-04  5:35     ` Varghese, Vipin
2018-12-06  5:37       ` Lu, Wenzhuo
2018-12-06  5:50         ` Varghese, Vipin
2018-12-03  7:06   ` [PATCH v2 15/20] net/ice: support queue information getting Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 16/20] net/ice: support basic RX/TX Wenzhuo Lu
2018-12-04  5:42     ` Varghese, Vipin
2018-12-04  5:44       ` Varghese, Vipin
2018-12-06  5:39       ` Lu, Wenzhuo
2018-12-06  5:55         ` Varghese, Vipin
2018-12-03  7:06   ` [PATCH v2 17/20] net/ice: support advance RX/TX Wenzhuo Lu
2018-12-03  7:06   ` [PATCH v2 18/20] net/ice: support descriptor ops Wenzhuo Lu
2018-12-03  7:07   ` [PATCH v2 19/20] doc: add ICE description and update release note Wenzhuo Lu
2018-12-03  8:15     ` Varghese, Vipin
2018-12-05  6:54       ` Lu, Wenzhuo
2018-12-06  4:34         ` Varghese, Vipin
2018-12-06  6:05           ` Lu, Wenzhuo
2018-12-06  6:08             ` Varghese, Vipin
2018-12-06  6:23               ` Lu, Wenzhuo
2018-12-06  6:25                 ` Varghese, Vipin
2018-12-06  6:35                   ` Lu, Wenzhuo
2018-12-03  7:07   ` [PATCH v2 20/20] net/ice: support meson build Wenzhuo Lu
2018-12-03 10:00     ` Varghese, Vipin
2018-12-05  7:03       ` Lu, Wenzhuo
2018-12-06  4:31         ` Varghese, Vipin
2018-12-06  5:59           ` Lu, Wenzhuo
2018-12-06  6:05             ` Varghese, Vipin
2018-12-12  6:59 ` [PATCH v3 00/34] A new net PMD - ice Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 01/34] net/ice: Add registers for Intel(R) E800 Series NIC Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 02/34] net/ice: Add basic structures Wenzhuo Lu
2018-12-12 15:19     ` Ferruh Yigit
2018-12-12 16:54       ` Stillwell Jr, Paul M
2018-12-12 16:57         ` Ferruh Yigit
2018-12-12 16:55       ` Ferruh Yigit
2018-12-12 15:19     ` Ferruh Yigit
2018-12-13  5:17       ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 03/34] net/ice: Add admin queue structures and commands Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 04/34] net/ice: Add sideband queue info Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 05/34] net/ice: Add device IDs for Intel(r) E800 Series NICs Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 06/34] net/ice: Add control queue information Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 07/34] net/ice: Add data center bridging (DCB) Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 08/34] net/ice: Add basic transmit scheduler Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 09/34] net/ice: Add virtual switch code Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 10/34] net/ice: Add code to work with the NVM Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 11/34] net/ice: Add common functions Wenzhuo Lu
2018-12-12 19:58     ` Mattias Rönnblom
2018-12-12 21:18       ` Stillwell Jr, Paul M
2018-12-13  1:26         ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 12/34] net/ice: Add various headers Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 13/34] net/ice: Add protocol structures and defines Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 14/34] net/ice: Add structures for RX/TX queues Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 15/34] net/ice: add OS specific implementation Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 16/34] net/ice: support device initialization Wenzhuo Lu
2018-12-12 18:17     ` Ferruh Yigit
2018-12-13  2:39       ` Lu, Wenzhuo
2018-12-13 15:13         ` Ferruh Yigit
2018-12-14  2:30           ` Lu, Wenzhuo
2018-12-13  2:57       ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 17/34] net/ice: support device and queue ops Wenzhuo Lu
2018-12-12 20:07     ` Mattias Rönnblom
2018-12-13  1:34       ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 18/34] net/ice: support getting device information Wenzhuo Lu
2018-12-13  9:10     ` Zhang, Qi Z
2018-12-14  0:41       ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 19/34] net/ice: support packet type getting Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 20/34] net/ice: support link update Wenzhuo Lu
2018-12-13  8:47     ` Zhang, Qi Z
2018-12-14  0:36       ` Lu, Wenzhuo
2018-12-14  2:43         ` Zhang, Qi Z
2018-12-14  8:09           ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 21/34] net/ice: support MTU setting Wenzhuo Lu
2018-12-13 21:05     ` Ferruh Yigit
2018-12-14  2:33       ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 22/34] net/ice: support MAC ops Wenzhuo Lu
2018-12-13  9:00     ` Zhang, Qi Z
2018-12-14  0:37       ` Lu, Wenzhuo
2018-12-12  6:59   ` [PATCH v3 23/34] net/ice: support VLAN ops Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 24/34] net/ice: support RSS Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 25/34] net/ice: support RX queue interruption Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 26/34] net/ice: support FW version getting Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 27/34] net/ice: support EEPROM information getting Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 28/34] net/ice: support statistics Wenzhuo Lu
2018-12-12  6:59   ` [PATCH v3 29/34] net/ice: support queue information getting Wenzhuo Lu
2018-12-12  7:00   ` [PATCH v3 30/34] net/ice: support basic RX/TX Wenzhuo Lu
2018-12-12  7:00   ` [PATCH v3 31/34] net/ice: support advance RX/TX Wenzhuo Lu
2018-12-12  7:00   ` [PATCH v3 32/34] net/ice: support descriptor ops Wenzhuo Lu
2018-12-13 21:30     ` Ferruh Yigit
2018-12-14  2:39       ` Lu, Wenzhuo
2018-12-12  7:00   ` [PATCH v3 33/34] doc: add ICE description and update release note Wenzhuo Lu
2018-12-13 21:34     ` Ferruh Yigit
2018-12-14  2:42       ` Lu, Wenzhuo
2018-12-12  7:00   ` [PATCH v3 34/34] net/ice: support meson build Wenzhuo Lu
2018-12-13 21:15     ` Ferruh Yigit
2018-12-14  2:38       ` Lu, Wenzhuo
2018-12-14  8:47         ` Ferruh Yigit
2018-12-16  1:43           ` Lu, Wenzhuo
2018-12-13  6:02   ` [PATCH v3 00/34] A new net PMD - ice Varghese, Vipin
2018-12-13  7:10     ` Lu, Wenzhuo
2018-12-13 13:09       ` Varghese, Vipin
2018-12-14  1:11         ` Lu, Wenzhuo
2018-12-14  3:26           ` Varghese, Vipin
2018-12-14  8:20             ` Lu, Wenzhuo
2018-12-14  8:34 ` [PATCH v4 00/32] A new net PMD - ICE Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 01/32] net/ice/base: add registers for Intel(R) E800 Series NIC Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 02/32] net/ice/base: add basic structures Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 03/32] net/ice/base: add admin queue structures and commands Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 04/32] net/ice/base: add sideband queue info Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 05/32] net/ice/base: add device IDs for Intel(r) E800 Series NICs Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 06/32] net/ice/base: add control queue information Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 07/32] net/ice/base: add data center bridging (DCB) Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 08/32] net/ice/base: add basic transmit scheduler Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 09/32] net/ice/base: add virtual switch code Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 10/32] net/ice/base: add code to work with the NVM Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 11/32] net/ice/base: add common functions Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 12/32] net/ice/base: add various headers Wenzhuo Lu
2018-12-14  8:34   ` [PATCH v4 13/32] net/ice/base: add protocol structures and defines Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 14/32] net/ice/base: add structures for RX/TX queues Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 15/32] net/ice/base: add OS specific implementation Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 16/32] net/ice: support device initialization Wenzhuo Lu
2018-12-14  9:46     ` Ferruh Yigit
2018-12-14 11:19       ` Zhang, Qi Z
2018-12-17  4:54       ` Lu, Wenzhuo
2018-12-14 12:05     ` David Marchand
2018-12-17  1:11       ` Lu, Wenzhuo
2018-12-14  8:35   ` [PATCH v4 17/32] net/ice: support device and queue ops Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 18/32] net/ice: support getting device information Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 19/32] net/ice: support packet type getting Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 20/32] net/ice: support link update Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 21/32] net/ice: support MTU setting Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 22/32] net/ice: support MAC ops Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 23/32] net/ice: support VLAN ops Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 24/32] net/ice: support RSS Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 25/32] net/ice: support RX queue interruption Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 26/32] net/ice: support FW version getting Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 27/32] net/ice: support EEPROM information getting Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 28/32] net/ice: support statistics Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 29/32] net/ice: support queue information getting Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 30/32] net/ice: support basic RX/TX Wenzhuo Lu
2018-12-14 13:00     ` Ferruh Yigit
2018-12-14 16:41       ` Thomas Monjalon
2018-12-17  6:47       ` Lu, Wenzhuo
2018-12-14  8:35   ` [PATCH v4 31/32] net/ice: support advance RX/TX Wenzhuo Lu
2018-12-14  8:35   ` [PATCH v4 32/32] net/ice: support descriptor ops Wenzhuo Lu
2018-12-17  7:37 ` [PATCH v5 00/31] A new net PMD - ICE Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 01/31] net/ice/base: add registers for Intel(R) E800 Series NIC Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 02/31] net/ice/base: add basic structures Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 03/31] net/ice/base: add admin queue structures and commands Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 04/31] net/ice/base: add sideband queue info Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 05/31] net/ice/base: add device IDs for Intel(r) E800 Series NICs Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 06/31] net/ice/base: add control queue information Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 07/31] net/ice/base: add basic transmit scheduler Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 08/31] net/ice/base: add virtual switch code Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 09/31] net/ice/base: add code to work with the NVM Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 10/31] net/ice/base: add common functions Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 11/31] net/ice/base: add various headers Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 12/31] net/ice/base: add protocol structures and defines Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 13/31] net/ice/base: add structures for RX/TX queues Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 14/31] net/ice/base: add OS specific implementation Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 15/31] net/ice: support device initialization Wenzhuo Lu
2018-12-17 22:29     ` Ferruh Yigit
2018-12-18  1:12       ` Lu, Wenzhuo
2018-12-17 23:15     ` Ferruh Yigit
2018-12-18  1:42       ` Lu, Wenzhuo
2018-12-17  7:37   ` Wenzhuo Lu [this message]
2018-12-17 23:48     ` [PATCH v5 16/31] net/ice: support device and queue ops Ferruh Yigit
2018-12-18  1:33       ` Lu, Wenzhuo
2018-12-17  7:37   ` [PATCH v5 17/31] net/ice: support getting device information Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 18/31] net/ice: support packet type getting Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 19/31] net/ice: support link update Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 20/31] net/ice: support MTU setting Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 21/31] net/ice: support MAC ops Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 22/31] net/ice: support VLAN ops Wenzhuo Lu
2018-12-17 22:45     ` Ferruh Yigit
2018-12-17  7:37   ` [PATCH v5 23/31] net/ice: support RSS Wenzhuo Lu
2018-12-17 22:47     ` Ferruh Yigit
2018-12-17  7:37   ` [PATCH v5 24/31] net/ice: support RX queue interruption Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 25/31] net/ice: support FW version getting Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 26/31] net/ice: support EEPROM information getting Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 27/31] net/ice: support statistics Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 28/31] net/ice: support queue information getting Wenzhuo Lu
2018-12-17  7:37   ` [PATCH v5 29/31] net/ice: support basic RX/TX Wenzhuo Lu
2018-12-17 22:58     ` Ferruh Yigit
2018-12-18  2:49       ` Lu, Wenzhuo
2018-12-17  7:37   ` [PATCH v5 30/31] net/ice: support advance RX/TX Wenzhuo Lu
2018-12-17 23:02     ` Ferruh Yigit
2018-12-18  3:11       ` Lu, Wenzhuo
2018-12-17 23:46     ` Ferruh Yigit
2018-12-18  3:13       ` Lu, Wenzhuo
2018-12-17  7:37   ` [PATCH v5 31/31] net/ice: support descriptor ops Wenzhuo Lu
2018-12-18  8:46 ` [PATCH v6 00/31] A new net PMD - ICE Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 01/31] net/ice/base: add registers for Intel(R) E800 Series NIC Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 02/31] net/ice/base: add basic structures Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 03/31] net/ice/base: add admin queue structures and commands Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 04/31] net/ice/base: add sideband queue info Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 05/31] net/ice/base: add device IDs for Intel(r) E800 Series NICs Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 06/31] net/ice/base: add control queue information Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 07/31] net/ice/base: add basic transmit scheduler Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 08/31] net/ice/base: add virtual switch code Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 09/31] net/ice/base: add code to work with the NVM Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 10/31] net/ice/base: add common functions Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 11/31] net/ice/base: add various headers Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 12/31] net/ice/base: add protocol structures and defines Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 13/31] net/ice/base: add structures for RX/TX queues Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 14/31] net/ice/base: add OS specific implementation Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 15/31] net/ice: support device initialization Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 16/31] net/ice: support device and queue ops Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 17/31] net/ice: support getting device information Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 18/31] net/ice: support link update Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 19/31] net/ice: support queue information getting Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 20/31] net/ice: support packet type getting Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 21/31] net/ice: support basic RX/TX Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 22/31] net/ice: support MTU setting Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 23/31] net/ice: support MAC ops Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 24/31] net/ice: support VLAN ops Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 25/31] net/ice: support RSS Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 26/31] net/ice: support RX queue interruption Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 27/31] net/ice: support FW version getting Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 28/31] net/ice: support EEPROM information getting Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 29/31] net/ice: support advance RX/TX Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 30/31] net/ice: support statistics Wenzhuo Lu
2018-12-18  8:46   ` [PATCH v6 31/31] support descriptor ops Wenzhuo Lu
2018-12-18 13:53   ` [PATCH v6 00/31] A new net PMD - ICE Ferruh Yigit
2018-12-19  3:27     ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1545032259-77179-17-git-send-email-wenzhuo.lu@intel.com \
    --to=wenzhuo.lu@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=xiaoyun.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.