All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rasesh Mody <rasesh.mody@qlogic.com>
To: <thomas.monjalon@6wind.com>, <bruce.richardson@intel.com>
Cc: <dev@dpdk.org>, <ameen.rahman@qlogic.com>,
	<harish.patil@qlogic.com>, <sony.chacko@qlogic.com>,
	Rasesh Mody <rasesh.mody@qlogic.com>
Subject: [PATCH v3 05/10] qede: Add core driver
Date: Fri, 18 Mar 2016 17:53:20 -0700	[thread overview]
Message-ID: <1458348805-32648-6-git-send-email-rasesh.mody@qlogic.com> (raw)
In-Reply-To: <1458348805-32648-1-git-send-email-rasesh.mody@qlogic.com>

Signed-off-by: Harish Patil <harish.patil@qlogic.com>
Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: Sony Chacko <sony.chacko@qlogic.com>
---
 drivers/net/qede/Makefile                 |   90 +++
 drivers/net/qede/qede_eth_if.h            |  176 +++++
 drivers/net/qede/qede_ethdev.c            |  957 +++++++++++++++++++++++
 drivers/net/qede/qede_ethdev.h            |  156 ++++
 drivers/net/qede/qede_if.h                |  155 ++++
 drivers/net/qede/qede_logs.h              |   93 +++
 drivers/net/qede/qede_main.c              |  548 ++++++++++++++
 drivers/net/qede/qede_rxtx.c              | 1172 +++++++++++++++++++++++++++++
 drivers/net/qede/qede_rxtx.h              |  187 +++++
 drivers/net/qede/rte_pmd_qede_version.map |    4 +
 10 files changed, 3538 insertions(+)
 create mode 100644 drivers/net/qede/Makefile
 create mode 100644 drivers/net/qede/qede_eth_if.h
 create mode 100644 drivers/net/qede/qede_ethdev.c
 create mode 100644 drivers/net/qede/qede_ethdev.h
 create mode 100644 drivers/net/qede/qede_if.h
 create mode 100644 drivers/net/qede/qede_logs.h
 create mode 100644 drivers/net/qede/qede_main.c
 create mode 100644 drivers/net/qede/qede_rxtx.c
 create mode 100644 drivers/net/qede/qede_rxtx.h
 create mode 100644 drivers/net/qede/rte_pmd_qede_version.map

diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
new file mode 100644
index 0000000..efaefb2
--- /dev/null
+++ b/drivers/net/qede/Makefile
@@ -0,0 +1,90 @@
+#    Copyright (c) 2016 QLogic Corporation.
+#    All rights reserved.
+#    www.qlogic.com
+#
+#    See LICENSE.qede_pmd for copyright and licensing details.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_qede.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_qede_version.map
+
+LIBABIVER := 1
+
+#
+#OS
+#
+OS_TYPE := $(shell uname -s)
+
+#
+# CFLAGS
+#
+CFLAGS_ECORE_DRIVER = -Wno-unused-parameter
+CFLAGS_ECORE_DRIVER += -Wno-unused-value
+CFLAGS_ECORE_DRIVER += -Wno-sign-compare
+CFLAGS_ECORE_DRIVER += -Wno-missing-prototypes
+CFLAGS_ECORE_DRIVER += -Wno-cast-qual
+CFLAGS_ECORE_DRIVER += -Wno-unused-function
+CFLAGS_ECORE_DRIVER += -Wno-unused-variable
+CFLAGS_ECORE_DRIVER += -Wno-strict-aliasing
+CFLAGS_ECORE_DRIVER += -Wno-missing-prototypes
+CFLAGS_ECORE_DRIVER += -Wno-format-nonliteral
+ifeq ($(OS_TYPE),Linux)
+CFLAGS_ECORE_DRIVER += -Wno-shift-negative-value
+endif
+
+ifneq (,$(filter gcc gcc48,$(CC)))
+CFLAGS_ECORE_DRIVER += -Wno-unused-but-set-variable
+CFLAGS_ECORE_DRIVER += -Wno-missing-declarations
+CFLAGS_ECORE_DRIVER += -Wno-maybe-uninitialized
+CFLAGS_ECORE_DRIVER += -Wno-strict-prototypes
+else ifeq ($(CC), clang)
+CFLAGS_ECORE_DRIVER += -Wno-format-extra-args
+CFLAGS_ECORE_DRIVER += -Wno-visibility
+CFLAGS_ECORE_DRIVER += -Wno-empty-body
+CFLAGS_ECORE_DRIVER += -Wno-invalid-source-encoding
+CFLAGS_ECORE_DRIVER += -Wno-sometimes-uninitialized
+CFLAGS_ECORE_DRIVER += -Wno-pointer-bool-conversion
+else
+#icc flags
+endif
+
+#
+# Add extra flags for base ecore driver files
+# to disable warnings in them
+#
+#
+ECORE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(ECORE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_ECORE_DRIVER)))
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
+
+# dependent libs:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net lib/librte_malloc
+
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
new file mode 100644
index 0000000..47b169d
--- /dev/null
+++ b/drivers/net/qede/qede_eth_if.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_ETH_IF_H
+#define _QEDE_ETH_IF_H
+
+#include "qede_if.h"
+
+/*forward decl */
+struct eth_slow_path_rx_cqe;
+
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+#define QED_ETH_INTERFACE_VERSION       609
+
+enum qed_filter_rx_mode_type {
+	QED_FILTER_RX_MODE_TYPE_REGULAR,
+	QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+	QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+	QED_FILTER_XCAST_TYPE_ADD,
+	QED_FILTER_XCAST_TYPE_DEL,
+	QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+enum qed_filter_type {
+	QED_FILTER_TYPE_UCAST,
+	QED_FILTER_TYPE_MCAST,
+	QED_FILTER_TYPE_RX_MODE,
+	QED_MAX_FILTER_TYPES,
+};
+
+struct qed_dev_eth_info {
+	struct qed_dev_info common;
+
+	uint8_t num_queues;
+	uint8_t num_tc;
+
+	struct ether_addr port_mac;
+	uint8_t num_vlan_filters;
+};
+
+struct qed_update_vport_rss_params {
+	uint16_t rss_ind_table[128];
+	uint32_t rss_key[10];
+};
+
+struct qed_stop_rxq_params {
+	uint8_t rss_id;
+	uint8_t rx_queue_id;
+	uint8_t vport_id;
+	bool eq_completion_only;
+};
+
+struct qed_update_vport_params {
+	uint8_t vport_id;
+	uint8_t update_vport_active_flg;
+	uint8_t vport_active_flg;
+	uint8_t update_inner_vlan_removal_flg;
+	uint8_t inner_vlan_removal_flg;
+	uint8_t update_tx_switching_flg;
+	uint8_t tx_switching_flg;
+	uint8_t update_accept_any_vlan_flg;
+	uint8_t accept_any_vlan;
+	uint8_t update_rss_flg;
+	struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_start_vport_params {
+	bool remove_inner_vlan;
+	bool handle_ptp_pkts;
+	bool gro_enable;
+	bool drop_ttl0;
+	uint8_t vport_id;
+	uint16_t mtu;
+	bool clear_stats;
+};
+
+struct qed_stop_txq_params {
+	uint8_t rss_id;
+	uint8_t tx_queue_id;
+};
+
+struct qed_filter_ucast_params {
+	enum qed_filter_xcast_params_type type;
+	uint8_t vlan_valid;
+	uint16_t vlan;
+	uint8_t mac_valid;
+	unsigned char mac[ETHER_ADDR_LEN];
+} __attribute__ ((__packed__));
+
+struct qed_filter_mcast_params {
+	enum qed_filter_xcast_params_type type;
+	uint8_t num;
+	unsigned char mac[64][ETHER_ADDR_LEN];
+};
+
+union qed_filter_type_params {
+	enum qed_filter_rx_mode_type accept_flags;
+	struct qed_filter_ucast_params ucast;
+	struct qed_filter_mcast_params mcast;
+};
+
+struct qed_filter_params {
+	enum qed_filter_type type;
+	union qed_filter_type_params filter;
+};
+
+struct qed_eth_ops {
+	const struct qed_common_ops *common;
+
+	int (*fill_dev_info)(struct ecore_dev *edev,
+			     struct qed_dev_eth_info *info);
+
+	int (*vport_start)(struct ecore_dev *edev,
+			   struct qed_start_vport_params *params);
+
+	int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
+
+	int (*vport_update)(struct ecore_dev *edev,
+			    struct qed_update_vport_params *params);
+
+	int (*q_rx_start)(struct ecore_dev *cdev,
+			  uint8_t rss_id, uint8_t rx_queue_id,
+			  uint8_t vport_id, uint16_t sb,
+			  uint8_t sb_index, uint16_t bd_max_bytes,
+			  dma_addr_t bd_chain_phys_addr,
+			  dma_addr_t cqe_pbl_addr,
+			  uint16_t cqe_pbl_size, void OSAL_IOMEM**pp_prod);
+
+	int (*q_rx_stop)(struct ecore_dev *edev,
+			 struct qed_stop_rxq_params *params);
+
+	int (*q_tx_start)(struct ecore_dev *edev,
+			  uint8_t rss_id, uint16_t tx_queue_id,
+			  uint8_t vport_id, uint16_t sb,
+			  uint8_t sb_index,
+			  dma_addr_t pbl_addr,
+			  uint16_t pbl_size, void OSAL_IOMEM**pp_doorbell);
+
+	int (*q_tx_stop)(struct ecore_dev *edev,
+			 struct qed_stop_txq_params *params);
+
+	int (*eth_cqe_completion)(struct ecore_dev *edev,
+				  uint8_t rss_id,
+				  struct eth_slow_path_rx_cqe *cqe);
+
+	int (*fastpath_stop)(struct ecore_dev *edev);
+
+	void (*get_vport_stats)(struct ecore_dev *edev,
+				struct ecore_eth_stats *stats);
+
+	int (*filter_config)(struct ecore_dev *edev,
+			     struct qed_filter_params *params);
+};
+
+/* externs */
+
+extern const struct qed_common_ops qed_common_ops_pass;
+
+extern int qed_fill_eth_dev_info(struct ecore_dev *edev,
+				 struct qed_dev_eth_info *info);
+
+void qed_put_eth_ops(void);
+
+int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+				 enum qed_filter_rx_mode_type type);
+
+#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
new file mode 100644
index 0000000..2915156
--- /dev/null
+++ b/drivers/net/qede/qede_ethdev.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_ethdev.h"
+
+/* Globals */
+static const struct qed_eth_ops *qed_ops;
+static const char *drivername = "qede pmd";
+
+static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
+{
+	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+}
+
+static void
+qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+	if (rte_intr_enable(&(eth_dev->pci_dev->intr_handle)))
+		DP_ERR(edev, "rte_intr_enable failed\n");
+}
+
+static void
+qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
+{
+	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
+	qdev->num_tc = qdev->dev_info.num_tc;
+	qdev->ops = qed_ops;
+}
+
+static void qede_print_adapter_info(struct qede_dev *qdev)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_dev_info *info = &qdev->dev_info.common;
+	char ver_str[QED_DRV_VER_STR_SIZE] = { 0 };
+
+	RTE_LOG(INFO, PMD,
+		  " Chip details : %s%d\n",
+		  ECORE_IS_BB(edev) ? "BB" : "AH",
+		  CHIP_REV_IS_A0(edev) ? 0 : 1);
+
+	sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
+		edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
+		QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
+	strcpy(qdev->drv_ver, ver_str);
+	RTE_LOG(INFO, PMD, " Driver version : %s\n", ver_str);
+
+	ver_str[0] = '\0';
+	sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
+		info->fw_rev, info->fw_eng);
+	RTE_LOG(INFO, PMD, " Firmware version : %s\n", ver_str);
+
+	ver_str[0] = '\0';
+	sprintf(ver_str, "%d.%d.%d.%d",
+		(info->mfw_rev >> 24) & 0xff,
+		(info->mfw_rev >> 16) & 0xff,
+		(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
+	RTE_LOG(INFO, PMD, " Management firmware version : %s\n", ver_str);
+
+	RTE_LOG(INFO, PMD, " Firmware file : %s\n", QEDE_FW_FILE_NAME);
+}
+
+static int
+qede_set_ucast_rx_mac(struct qede_dev *qdev,
+		      enum qed_filter_xcast_params_type opcode,
+		      uint8_t mac[ETHER_ADDR_LEN])
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_filter_params filter_cmd;
+
+	memset(&filter_cmd, 0, sizeof(filter_cmd));
+	filter_cmd.type = QED_FILTER_TYPE_UCAST;
+	filter_cmd.filter.ucast.type = opcode;
+	filter_cmd.filter.ucast.mac_valid = 1;
+	rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
+	return qdev->ops->filter_config(edev, &filter_cmd);
+}
+
+static void
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+		  __rte_unused uint32_t index, __rte_unused uint32_t pool)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	int rc;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	DP_NOTICE(edev, false, "%s\n", __func__);
+
+	/* Skip adding macaddr if promiscuous mode is set */
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
+		DP_NOTICE(edev, false, "Port is in promiscuous mode\n");
+		return;
+	}
+
+	/* Add MAC filters according to the unicast secondary macs */
+	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+				   mac_addr->addr_bytes);
+	if (rc)
+		DP_ERR(edev, "Unable to add filter\n");
+}
+
+static void
+qede_mac_addr_remove(__rte_unused struct rte_eth_dev *eth_dev,
+		     __rte_unused uint32_t index)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	/* TBD: Not implemented currently because DPDK does not provide
+	 * macaddr and instead just passes the index. So pmd needs to
+	 * maintain index mapping to macaddr.
+	 */
+	DP_NOTICE(edev, false, "%s: Unsupported operation\n", __func__);
+}
+
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_update_vport_params params;
+	int rc;
+
+	/* Proceed only if action actually needs to be performed */
+	if (qdev->accept_any_vlan == action)
+		return;
+
+	memset(&params, 0, sizeof(params));
+
+	params.vport_id = 0;
+	params.accept_any_vlan = action;
+	params.update_accept_any_vlan_flg = 1;
+
+	rc = qdev->ops->vport_update(edev, &params);
+	if (rc) {
+		DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+		       action ? "enable" : "disable");
+	} else {
+		DP_INFO(edev, "%s accept-any-vlan\n",
+			action ? "enabled" : "disabled");
+		qdev->accept_any_vlan = action;
+	}
+}
+
+void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	/* TODO: - QED_FILTER_TYPE_UCAST */
+	enum qed_filter_rx_mode_type accept_flags =
+			QED_FILTER_RX_MODE_TYPE_REGULAR;
+	struct qed_filter_params rx_mode;
+	int rc;
+
+	/* Configure the struct for the Rx mode */
+	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
+				   eth_dev->data->mac_addrs[0].addr_bytes);
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
+		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+	} else {
+		rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+					   eth_dev->data->
+					   mac_addrs[0].addr_bytes);
+		if (rc) {
+			DP_ERR(edev, "Unable to add filter\n");
+			return;
+		}
+	}
+
+	/* take care of VLAN mode */
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
+		qede_config_accept_any_vlan(qdev, true);
+	} else if (!qdev->non_configured_vlans) {
+		/* If we dont have non-configured VLANs and promisc
+		 * is not set, then check if we need to disable
+		 * accept_any_vlan mode.
+		 * Because in this case, accept_any_vlan mode is set
+		 * as part of IFF_RPOMISC flag handling.
+		 */
+		qede_config_accept_any_vlan(qdev, false);
+	}
+	rx_mode.filter.accept_flags = accept_flags;
+	(void)qdev->ops->filter_config(edev, &rx_mode);
+}
+
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+{
+	struct qed_update_vport_params vport_update_params;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	int rc;
+
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = 0;
+	vport_update_params.update_inner_vlan_removal_flg = 1;
+	vport_update_params.inner_vlan_removal_flg = set_stripping;
+	rc = qdev->ops->vport_update(edev, &vport_update_params);
+	if (rc) {
+		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+	if (mask & ETH_VLAN_STRIP_MASK) {
+		if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+			(void)qede_vlan_stripping(eth_dev, 1);
+		else
+			(void)qede_vlan_stripping(eth_dev, 0);
+	}
+
+	DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
+		mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
+				  enum qed_filter_xcast_params_type opcode,
+				  uint16_t vid)
+{
+	struct qed_filter_params filter_cmd;
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+	memset(&filter_cmd, 0, sizeof(filter_cmd));
+	filter_cmd.type = QED_FILTER_TYPE_UCAST;
+	filter_cmd.filter.ucast.type = opcode;
+	filter_cmd.filter.ucast.vlan_valid = 1;
+	filter_cmd.filter.ucast.vlan = vid;
+
+	return qdev->ops->filter_config(edev, &filter_cmd);
+}
+
+static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
+				uint16_t vlan_id, int on)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_dev_eth_info *dev_info = &qdev->dev_info;
+	int rc;
+
+	if (vlan_id != 0 &&
+	    qdev->configured_vlans == dev_info->num_vlan_filters) {
+		DP_NOTICE(edev, false, "Reached max VLAN filter limit"
+				     " enabling accept_any_vlan\n");
+		qede_config_accept_any_vlan(qdev, true);
+		return 0;
+	}
+
+	if (on) {
+		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
+					    vlan_id);
+		if (rc)
+			DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
+			       rc);
+		else
+			if (vlan_id != 0)
+				qdev->configured_vlans++;
+	} else {
+		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
+					    vlan_id);
+		if (rc)
+			DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
+			       vlan_id, rc);
+		else
+			if (vlan_id != 0)
+				qdev->configured_vlans--;
+	}
+
+	DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
+			vlan_id, on, rc, qdev->configured_vlans);
+
+	return rc;
+}
+
+static int qede_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+	int rc = 0;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
+		DP_NOTICE(edev, false,
+			  "Unequal number of rx/tx queues "
+			  "is not supported RX=%u TX=%u\n",
+			  eth_dev->data->nb_rx_queues,
+			  eth_dev->data->nb_tx_queues);
+		return -EINVAL;
+	}
+
+	qdev->num_rss = eth_dev->data->nb_rx_queues;
+
+	/* Initial state */
+	qdev->state = QEDE_CLOSE;
+
+	/* Sanity checks and throw warnings */
+
+	if (rxmode->enable_scatter == 1) {
+		DP_ERR(edev, "RX scatter packets is not supported\n");
+		return -EINVAL;
+	}
+
+	if (rxmode->enable_lro == 1) {
+		DP_INFO(edev, "LRO is not supported\n");
+		return -EINVAL;
+	}
+
+	if (!rxmode->hw_strip_crc)
+		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
+
+	if (!rxmode->hw_ip_checksum)
+		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
+			      "in hw\n");
+
+
+	DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+		QEDE_RSS_CNT(qdev), qdev->num_tc);
+
+	DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
+		" port %u first_on_engine %d\n",
+		edev->hwfns[0].my_id,
+		edev->hwfns[0].rel_pf_id,
+		edev->hwfns[0].abs_pf_id,
+		edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
+
+	return 0;
+}
+
+/* Info about HW descriptor ring limitations */
+static const struct rte_eth_desc_lim qede_rx_desc_lim = {
+	.nb_max = NUM_RX_BDS_MAX,
+	.nb_min = 128,
+	.nb_align = 128		/* lowest common multiple */
+};
+
+static const struct rte_eth_desc_lim qede_tx_desc_lim = {
+	.nb_max = NUM_TX_BDS_MAX,
+	.nb_min = 256,
+	.nb_align = 256
+};
+
+static void
+qede_dev_info_get(struct rte_eth_dev *eth_dev,
+		  struct rte_eth_dev_info *dev_info)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
+					      QEDE_ETH_OVERHEAD);
+	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
+	dev_info->rx_desc_lim = qede_rx_desc_lim;
+	dev_info->tx_desc_lim = qede_tx_desc_lim;
+	/* Fix it for 8 queues for now */
+	dev_info->max_rx_queues = 8;
+	dev_info->max_tx_queues = 8;
+	dev_info->max_mac_addrs = (uint32_t)(RESC_NUM(&edev->hwfns[0],
+						      ECORE_MAC));
+	dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
+	dev_info->driver_name = qdev->drv_ver;
+	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+	.txq_flags = QEDE_TXQ_FLAGS,};
+	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
+				     DEV_RX_OFFLOAD_IPV4_CKSUM |
+				     DEV_RX_OFFLOAD_UDP_CKSUM |
+				     DEV_RX_OFFLOAD_TCP_CKSUM);
+	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+				     DEV_TX_OFFLOAD_IPV4_CKSUM |
+				     DEV_TX_OFFLOAD_UDP_CKSUM |
+				     DEV_TX_OFFLOAD_TCP_CKSUM);
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	uint16_t link_duplex;
+	struct qed_link_output link;
+	struct rte_eth_link *old = &eth_dev->data->dev_link;
+
+	memset(&link, 0, sizeof(struct qed_link_output));
+	qdev->ops->common->get_link(edev, &link);
+	if (old->link_status == link.link_up)
+		return -1;
+
+	/* Speed */
+	eth_dev->data->dev_link.link_speed = link.speed;
+
+	/* Duplex/Simplex */
+	switch (link.duplex) {
+	case QEDE_DUPLEX_HALF:
+		link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case QEDE_DUPLEX_FULL:
+		link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case QEDE_DUPLEX_UNKNOWN:
+	default:
+		link_duplex = -1;
+	}
+
+	eth_dev->data->dev_link.link_duplex = link_duplex;
+	eth_dev->data->dev_link.link_status = link.link_up;
+
+	/* Link state changed */
+	return 0;
+}
+
+static void
+qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
+		     enum qed_filter_rx_mode_type accept_flags)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_filter_params rx_mode;
+
+	DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
+
+	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+	rx_mode.filter.accept_flags = accept_flags;
+	qdev->ops->filter_config(edev, &rx_mode);
+}
+
+static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+		type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+	qede_rx_mode_setting(eth_dev, type);
+}
+
+static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+		qede_rx_mode_setting(eth_dev,
+				     QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
+	else
+		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static void qede_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	/* dev_stop() shall cleanup fp resources in hw but without releasing
+	 * dma memories and sw structures so that dev_start() can be called
+	 * by the app without reconfiguration. However, in dev_close() we
+	 * can release all the resources and device can be brought up newly
+	 */
+	if (qdev->state != QEDE_STOP)
+		qede_dev_stop(eth_dev);
+	else
+		DP_INFO(edev, "Device is already stopped\n");
+
+	qede_free_mem_load(qdev);
+
+	qede_free_fp_arrays(qdev);
+
+	qede_dev_set_link_state(eth_dev, false);
+
+	qdev->ops->common->slowpath_stop(edev);
+
+	qdev->ops->common->remove(edev);
+
+	rte_intr_disable(&(eth_dev->pci_dev->intr_handle));
+
+	rte_intr_callback_unregister(&(eth_dev->pci_dev->intr_handle),
+				     qede_interrupt_handler, (void *)eth_dev);
+
+	qdev->state = QEDE_CLOSE;
+}
+
+static void
+qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct ecore_eth_stats stats;
+
+	qdev->ops->get_vport_stats(edev, &stats);
+
+	/* RX Stats */
+	eth_stats->ipackets = stats.rx_ucast_pkts +
+	    stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+
+	eth_stats->ibytes = stats.rx_ucast_bytes +
+	    stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+
+	eth_stats->imcasts = stats.rx_mcast_pkts;
+
+	eth_stats->ierrors = stats.rx_crc_errors +
+	    stats.rx_align_errors +
+	    stats.rx_carrier_errors +
+	    stats.rx_oversize_packets +
+	    stats.rx_jabbers + stats.rx_undersize_packets;
+
+	eth_stats->rx_nombuf = stats.no_buff_discards;
+
+	eth_stats->imissed = stats.mftag_filter_discards +
+	    stats.mac_filter_discards +
+	    stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+
+	/* TX stats */
+	eth_stats->opackets = stats.tx_ucast_pkts +
+	    stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+
+	eth_stats->obytes = stats.tx_ucast_bytes +
+	    stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+
+	eth_stats->oerrors = stats.tx_err_drop_pkts;
+
+	DP_INFO(edev,
+		"no_buff_discards=%" PRIu64 ""
+		" mac_filter_discards=%" PRIu64 ""
+		" brb_truncates=%" PRIu64 ""
+		" brb_discards=%" PRIu64 "\n",
+		stats.no_buff_discards,
+		stats.mac_filter_discards,
+		stats.brb_truncates, stats.brb_discards);
+}
+
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_link_params link_params;
+	int rc;
+
+	DP_INFO(edev, "setting link state %d\n", link_up);
+	memset(&link_params, 0, sizeof(link_params));
+	link_params.link_up = link_up;
+	rc = qdev->ops->common->set_link(edev, &link_params);
+	if (rc != ECORE_SUCCESS)
+		DP_ERR(edev, "Unable to set link state %d\n", link_up);
+
+	return rc;
+}
+
+static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_set_link_state(eth_dev, true);
+}
+
+static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_set_link_state(eth_dev, false);
+}
+
+static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+	enum qed_filter_rx_mode_type type =
+	    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+		type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+	qede_rx_mode_setting(eth_dev, type);
+}
+
+static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
+	else
+		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+			      struct rte_eth_fc_conf *fc_conf)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_link_output current_link;
+	struct qed_link_params params;
+
+	memset(&current_link, 0, sizeof(current_link));
+	qdev->ops->common->get_link(edev, &current_link);
+
+	memset(&params, 0, sizeof(params));
+	params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+	if (fc_conf->autoneg) {
+		if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
+			DP_ERR(edev, "Autoneg not supported\n");
+			return -EINVAL;
+		}
+		params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+	}
+
+	/* Pause is assumed to be supported (SUPPORTED_Pause) */
+	if (fc_conf->mode == RTE_FC_FULL)
+		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
+					QED_LINK_PAUSE_RX_ENABLE);
+	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+	params.link_up = true;
+	(void)qdev->ops->common->set_link(edev, &params);
+
+	return 0;
+}
+
+static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+			      struct rte_eth_fc_conf *fc_conf)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_link_output current_link;
+
+	memset(&current_link, 0, sizeof(current_link));
+	qdev->ops->common->get_link(edev, &current_link);
+
+	if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+		fc_conf->autoneg = true;
+
+	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
+					 QED_LINK_PAUSE_TX_ENABLE))
+		fc_conf->mode = RTE_FC_FULL;
+	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
+		fc_conf->mode = RTE_FC_RX_PAUSE;
+	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
+		fc_conf->mode = RTE_FC_TX_PAUSE;
+	else
+		fc_conf->mode = RTE_FC_NONE;
+
+	return 0;
+}
+
+static struct eth_dev_ops qede_eth_dev_ops = {
+	.dev_configure = qede_dev_configure,
+	.dev_infos_get = qede_dev_info_get,
+	.rx_queue_setup = qede_rx_queue_setup,
+	.rx_queue_release = qede_rx_queue_release,
+	.tx_queue_setup = qede_tx_queue_setup,
+	.tx_queue_release = qede_tx_queue_release,
+	.dev_start = qede_dev_start,
+	.dev_set_link_up = qede_dev_set_link_up,
+	.dev_set_link_down = qede_dev_set_link_down,
+	.link_update = qede_link_update,
+	.promiscuous_enable = qede_promiscuous_enable,
+	.promiscuous_disable = qede_promiscuous_disable,
+	.allmulticast_enable = qede_allmulticast_enable,
+	.allmulticast_disable = qede_allmulticast_disable,
+	.dev_stop = qede_dev_stop,
+	.dev_close = qede_dev_close,
+	.stats_get = qede_get_stats,
+	.mac_addr_add = qede_mac_addr_add,
+	.mac_addr_remove = qede_mac_addr_remove,
+	.vlan_offload_set = qede_vlan_offload_set,
+	.vlan_filter_set = qede_vlan_filter_set,
+	.flow_ctrl_set = qede_flow_ctrl_set,
+	.flow_ctrl_get = qede_flow_ctrl_get,
+};
+
+static void qede_update_pf_params(struct ecore_dev *edev)
+{
+	struct ecore_pf_params pf_params;
+	/* 16 rx + 16 tx */
+	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
+	pf_params.eth_pf_params.num_cons = 32;
+	qed_ops->common->update_pf_params(edev, &pf_params);
+}
+
+static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
+{
+	struct rte_pci_device *pci_dev;
+	struct rte_pci_addr pci_addr;
+	struct qede_dev *adapter;
+	struct ecore_dev *edev;
+	struct qed_dev_eth_info dev_info;
+	struct qed_slowpath_params params;
+	uint32_t qed_ver;
+	static bool do_once = true;
+	uint8_t bulletin_change;
+	uint8_t vf_mac[ETHER_ADDR_LEN];
+	uint8_t is_mac_forced;
+	bool is_mac_exist;
+	/* Fix up ecore debug level */
+	uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
+	uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+	int rc;
+
+	/* Extract key data structures */
+	adapter = eth_dev->data->dev_private;
+	edev = &adapter->edev;
+	pci_addr = eth_dev->pci_dev->addr;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
+		 pci_addr.bus, pci_addr.devid, pci_addr.function,
+		 eth_dev->data->port_id);
+
+	eth_dev->rx_pkt_burst = qede_recv_pkts;
+	eth_dev->tx_pkt_burst = qede_xmit_pkts;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		DP_NOTICE(edev, false,
+			  "Skipping device init from secondary process\n");
+		return 0;
+	}
+
+	pci_dev = eth_dev->pci_dev;
+
+	rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+	if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
+		DP_ERR(edev, "Version mismatch [%08x != %08x]\n",
+		       qed_ver, QEDE_ETH_INTERFACE_VERSION);
+		return -EINVAL;
+	}
+
+	DP_INFO(edev, "Starting qede probe\n");
+
+	rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
+				    dp_module, dp_level, is_vf);
+
+	if (rc != 0) {
+		DP_ERR(edev, "qede probe failed rc 0x%x\n", rc);
+		return -ENODEV;
+	}
+
+	qede_update_pf_params(edev);
+
+	rte_intr_callback_register(&(eth_dev->pci_dev->intr_handle),
+				   qede_interrupt_handler, (void *)eth_dev);
+
+	if (rte_intr_enable(&(eth_dev->pci_dev->intr_handle))) {
+		DP_ERR(edev, "rte_intr_enable() failed\n");
+		return -ENODEV;
+	}
+
+	/* Start the Slowpath-process */
+	memset(&params, 0, sizeof(struct qed_slowpath_params));
+	params.int_mode = ECORE_INT_MODE_MSIX;
+	params.drv_major = QEDE_MAJOR_VERSION;
+	params.drv_minor = QEDE_MINOR_VERSION;
+	params.drv_rev = QEDE_REVISION_VERSION;
+	params.drv_eng = QEDE_ENGINEERING_VERSION;
+	strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+
+	rc = qed_ops->common->slowpath_start(edev, &params);
+	if (rc) {
+		DP_ERR(edev, "Cannot start slowpath rc=0x%x\n", rc);
+		return -ENODEV;
+	}
+
+	rc = qed_ops->fill_dev_info(edev, &dev_info);
+	if (rc) {
+		DP_ERR(edev, "Cannot get device_info rc=0x%x\n", rc);
+		qed_ops->common->slowpath_stop(edev);
+		qed_ops->common->remove(edev);
+		return -ENODEV;
+	}
+
+	qede_alloc_etherdev(adapter, &dev_info);
+
+	adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
+
+	/* Allocate memory for storing primary macaddr */
+	eth_dev->data->mac_addrs = rte_zmalloc(edev->name, ETHER_ADDR_LEN,
+					       RTE_CACHE_LINE_SIZE);
+
+	if (eth_dev->data->mac_addrs == NULL) {
+		DP_ERR(edev, "Failed to allocate MAC address\n");
+		qed_ops->common->slowpath_stop(edev);
+		qed_ops->common->remove(edev);
+		return -ENOMEM;
+	}
+
+	ether_addr_copy((struct ether_addr *)edev->hwfns[0].
+				hw_info.hw_mac_addr,
+				&eth_dev->data->mac_addrs[0]);
+
+	eth_dev->dev_ops = &qede_eth_dev_ops;
+
+	if (do_once) {
+		qede_print_adapter_info(adapter);
+		do_once = false;
+	}
+
+	DP_NOTICE(edev, false, "macaddr %02x:%02x:%02x:%02x:%02x:%02x\n",
+		  eth_dev->data->mac_addrs[0].addr_bytes[0],
+		  eth_dev->data->mac_addrs[0].addr_bytes[1],
+		  eth_dev->data->mac_addrs[0].addr_bytes[2],
+		  eth_dev->data->mac_addrs[0].addr_bytes[3],
+		  eth_dev->data->mac_addrs[0].addr_bytes[4],
+		  eth_dev->data->mac_addrs[0].addr_bytes[5]);
+
+	return rc;
+}
+
+static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+	return qede_common_dev_init(eth_dev, 1);
+}
+
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+	return qede_common_dev_init(eth_dev, 0);
+}
+
+static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
+{
+	/* only uninitialize in the primary process */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	/* safe to close dev here */
+	qede_dev_close(eth_dev);
+
+	eth_dev->dev_ops = NULL;
+	eth_dev->rx_pkt_burst = NULL;
+	eth_dev->tx_pkt_burst = NULL;
+
+	if (eth_dev->data->mac_addrs)
+		rte_free(eth_dev->data->mac_addrs);
+
+	eth_dev->data->mac_addrs = NULL;
+
+	return 0;
+}
+
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_common_uninit(eth_dev);
+}
+
+static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_common_uninit(eth_dev);
+}
+
+static struct rte_pci_id pci_id_qedevf_map[] = {
+#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+	{
+		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
+	},
+	{
+		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
+	},
+	{.vendor_id = 0,}
+};
+
+static struct rte_pci_id pci_id_qede_map[] = {
+#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
+	},
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
+	},
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
+	},
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
+	},
+	{.vendor_id = 0,}
+};
+
+static struct eth_driver rte_qedevf_pmd = {
+	.pci_drv = {
+		    .name = "rte_qedevf_pmd",
+		    .id_table = pci_id_qedevf_map,
+		    .drv_flags =
+		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+		    },
+	.eth_dev_init = qedevf_eth_dev_init,
+	.eth_dev_uninit = qedevf_eth_dev_uninit,
+	.dev_private_size = sizeof(struct qede_dev),
+};
+
+static struct eth_driver rte_qede_pmd = {
+	.pci_drv = {
+		    .name = "rte_qede_pmd",
+		    .id_table = pci_id_qede_map,
+		    .drv_flags =
+		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+		    },
+	.eth_dev_init = qede_eth_dev_init,
+	.eth_dev_uninit = qede_eth_dev_uninit,
+	.dev_private_size = sizeof(struct qede_dev),
+};
+
+static int
+rte_qedevf_pmd_init(const char *name __rte_unused,
+		    const char *params __rte_unused)
+{
+	rte_eth_driver_register(&rte_qedevf_pmd);
+
+	return 0;
+}
+
+static int
+rte_qede_pmd_init(const char *name __rte_unused,
+		  const char *params __rte_unused)
+{
+	rte_eth_driver_register(&rte_qede_pmd);
+
+	return 0;
+}
+
+static struct rte_driver rte_qedevf_driver = {
+	.type = PMD_PDEV,
+	.init = rte_qede_pmd_init
+};
+
+static struct rte_driver rte_qede_driver = {
+	.type = PMD_PDEV,
+	.init = rte_qedevf_pmd_init
+};
+
+PMD_REGISTER_DRIVER(rte_qede_driver);
+PMD_REGISTER_DRIVER(rte_qedevf_driver);
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
new file mode 100644
index 0000000..9f5be7a
--- /dev/null
+++ b/drivers/net/qede/qede_ethdev.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_ETHDEV_H_
+#define _QEDE_ETHDEV_H_
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+
+/* ecore includes */
+#include "base/bcm_osal.h"
+#include "base/ecore.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_sp_api.h"
+#include "base/ecore_mcp_api.h"
+#include "base/ecore_hsi_common.h"
+#include "base/ecore_int_api.h"
+#include "base/ecore_chain.h"
+#include "base/ecore_status.h"
+#include "base/ecore_hsi_eth.h"
+#include "base/ecore_dev_api.h"
+
+#include "qede_logs.h"
+#include "qede_if.h"
+#include "qede_eth_if.h"
+
+#include "qede_rxtx.h"
+
+#define qede_stringify1(x...)		#x
+#define qede_stringify(x...)		qede_stringify1(x)
+
+/* Driver versions */
+#define QEDE_PMD_VER_PREFIX		"QEDE PMD"
+#define QEDE_PMD_VERSION_MAJOR		1
+#define QEDE_PMD_VERSION_MINOR		0
+#define QEDE_PMD_VERSION_REVISION	4
+#define QEDE_PMD_VERSION_PATCH		1
+
+#define QEDE_MAJOR_VERSION		8
+#define QEDE_MINOR_VERSION		7
+#define QEDE_REVISION_VERSION		9
+#define QEDE_ENGINEERING_VERSION	0
+
+#define QEDE_DRV_MODULE_VERSION qede_stringify(QEDE_MAJOR_VERSION) "."	\
+		qede_stringify(QEDE_MINOR_VERSION) "."			\
+		qede_stringify(QEDE_REVISION_VERSION) "."		\
+		qede_stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_RSS_INDIR_INITED     (1 << 0)
+#define QEDE_RSS_KEY_INITED       (1 << 1)
+#define QEDE_RSS_CAPS_INITED      (1 << 2)
+
+#define QEDE_MAX_RSS_CNT(edev)  ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev)  ((edev)->dev_info.num_queues * \
+					(edev)->dev_info.num_tc)
+
+#define QEDE_RSS_CNT(edev)	((edev)->num_rss)
+#define QEDE_TSS_CNT(edev)	((edev)->num_rss * (edev)->num_tc)
+
+#define QEDE_DUPLEX_FULL	1
+#define QEDE_DUPLEX_HALF	2
+#define QEDE_DUPLEX_UNKNOWN     0xff
+
+#define QEDE_SUPPORTED_AUTONEG (1 << 6)
+#define QEDE_SUPPORTED_PAUSE   (1 << 13)
+
+#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private)
+
+#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev)
+
+#define QEDE_INIT(eth_dev) {					\
+	struct qede_dev *qdev = eth_dev->data->dev_private;	\
+	struct ecore_dev *edev = &qdev->edev;			\
+}
+
+/************* QLogic 25G/40G vendor/devices ids *************/
+#define PCI_VENDOR_ID_QLOGIC            0x1077
+
+#define CHIP_NUM_57980E                 0x1634
+#define CHIP_NUM_57980S                 0x1629
+#define CHIP_NUM_VF                     0x1630
+#define CHIP_NUM_57980S_40              0x1634
+#define CHIP_NUM_57980S_25              0x1656
+#define CHIP_NUM_57980S_IOV             0x1664
+
+#define PCI_DEVICE_ID_NX2_57980E        CHIP_NUM_57980E
+#define PCI_DEVICE_ID_NX2_57980S        CHIP_NUM_57980S
+#define PCI_DEVICE_ID_NX2_VF            CHIP_NUM_VF
+#define PCI_DEVICE_ID_57980S_40         CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_25         CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV        CHIP_NUM_57980S_IOV
+
+extern const char *QEDE_FW_FILE_NAME;
+
+/* Port/function states */
+enum dev_state {
+	QEDE_START,
+	QEDE_STOP,
+	QEDE_CLOSE
+};
+
+struct qed_int_param {
+	uint32_t int_mode;
+	uint8_t num_vectors;
+	uint8_t min_msix_cnt;
+};
+
+struct qed_int_params {
+	struct qed_int_param in;
+	struct qed_int_param out;
+	bool fp_initialized;
+};
+
+/*
+ *  Structure to store private data for each port.
+ */
+struct qede_dev {
+	struct ecore_dev edev;
+	uint8_t protocol;
+	const struct qed_eth_ops *ops;
+	struct qed_dev_eth_info dev_info;
+	struct ecore_sb_info *sb_array;
+	struct qede_fastpath *fp_array;
+	uint16_t num_rss;
+	uint8_t num_tc;
+	uint16_t mtu;
+	uint32_t rss_params_inited;
+	struct qed_update_vport_rss_params rss_params;
+	uint32_t flags;
+	bool gro_disable;
+	struct qede_rx_queue **rx_queues;
+	struct qede_tx_queue **tx_queues;
+	enum dev_state state;
+
+	/* Vlans */
+	osal_list_t vlan_list;
+	uint16_t configured_vlans;
+	uint16_t non_configured_vlans;
+	bool accept_any_vlan;
+	uint16_t vxlan_dst_port;
+
+	bool handle_hw_err;
+	char drv_ver[QED_DRV_VER_STR_SIZE];
+};
+
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+void qede_config_rx_mode(struct rte_eth_dev *eth_dev);
+
+#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
new file mode 100644
index 0000000..935eed8
--- /dev/null
+++ b/drivers/net/qede/qede_if.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_IF_H
+#define _QEDE_IF_H
+
+#include "qede_ethdev.h"
+
+/* forward */
+struct ecore_dev;
+struct qed_sb_info;
+struct qed_pf_params;
+enum ecore_int_mode;
+
+struct qed_dev_info {
+	uint8_t num_hwfns;
+	uint8_t hw_mac[ETHER_ADDR_LEN];
+	bool is_mf_default;
+
+	/* FW version */
+	uint16_t fw_major;
+	uint16_t fw_minor;
+	uint16_t fw_rev;
+	uint16_t fw_eng;
+
+	/* MFW version */
+	uint32_t mfw_rev;
+
+	uint32_t flash_size;
+	uint8_t mf_mode;
+	bool tx_switching;
+	/* To be added... */
+};
+
+enum qed_sb_type {
+	QED_SB_TYPE_L2_QUEUE,
+	QED_SB_TYPE_STORAGE,
+	QED_SB_TYPE_CNQ,
+};
+
+enum qed_protocol {
+	QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+	bool link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG         (1 << 0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      (1 << 1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    (1 << 2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG          (1 << 3)
+	uint32_t override_flags;
+	bool autoneg;
+	uint32_t adv_speeds;
+	uint32_t forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE           (1 << 0)
+#define QED_LINK_PAUSE_RX_ENABLE                (1 << 1)
+#define QED_LINK_PAUSE_TX_ENABLE                (1 << 2)
+	uint32_t pause_config;
+};
+
+struct qed_link_output {
+	bool link_up;
+	uint32_t supported_caps;	/* In SUPPORTED defs */
+	uint32_t advertised_caps;	/* In ADVERTISED defs */
+	uint32_t lp_caps;	/* In ADVERTISED defs */
+	uint32_t speed;		/* In Mb/s */
+	uint8_t duplex;		/* In DUPLEX defs */
+	uint8_t port;		/* In PORT defs */
+	bool autoneg;
+	uint32_t pause_config;
+};
+
+#define QED_DRV_VER_STR_SIZE 80
+struct qed_slowpath_params {
+	uint32_t int_mode;
+	uint8_t drv_major;
+	uint8_t drv_minor;
+	uint8_t drv_rev;
+	uint8_t drv_eng;
+	uint8_t name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000	/* 32KB */
+
+struct qed_common_cb_ops {
+	void (*link_update)(void *dev, struct qed_link_output *link);
+};
+
+struct qed_selftest_ops {
+/**
+ * @brief registers - Perform register tests
+ *
+ * @param edev
+ *
+ * @return 0 on success, error otherwise.
+ */
+	int (*registers)(struct ecore_dev *edev);
+};
+
+struct qed_common_ops {
+	int (*probe)(struct ecore_dev *edev,
+		     struct rte_pci_device *pci_dev,
+		     enum qed_protocol protocol,
+		     uint32_t dp_module, uint8_t dp_level, bool is_vf);
+	void (*set_id)(struct ecore_dev *edev,
+		char name[], const char ver_str[]);
+	enum _ecore_status_t (*chain_alloc)(struct ecore_dev *edev,
+					    enum ecore_chain_use_mode
+					    intended_use,
+					    enum ecore_chain_mode mode,
+					    enum ecore_chain_cnt_type cnt_type,
+					    uint32_t num_elems,
+					    osal_size_t elem_size,
+					    struct ecore_chain *p_chain);
+
+	void (*chain_free)(struct ecore_dev *edev,
+			   struct ecore_chain *p_chain);
+
+	void (*get_link)(struct ecore_dev *edev,
+			 struct qed_link_output *if_link);
+	int (*set_link)(struct ecore_dev *edev,
+			struct qed_link_params *params);
+
+	int (*drain)(struct ecore_dev *edev);
+
+	void (*remove)(struct ecore_dev *edev);
+
+	int (*slowpath_stop)(struct ecore_dev *edev);
+
+	void (*update_pf_params)(struct ecore_dev *edev,
+				 struct ecore_pf_params *params);
+
+	int (*slowpath_start)(struct ecore_dev *edev,
+			      struct qed_slowpath_params *params);
+
+	int (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt);
+
+	uint32_t (*sb_init)(struct ecore_dev *edev,
+			    struct ecore_sb_info *sb_info,
+			    void *sb_virt_addr,
+			    dma_addr_t sb_phy_addr,
+			    uint16_t sb_id, enum qed_sb_type type);
+
+	bool (*can_link_change)(struct ecore_dev *edev);
+	void (*update_msglvl)(struct ecore_dev *edev,
+			      uint32_t dp_module, uint8_t dp_level);
+};
+
+#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
new file mode 100644
index 0000000..46a54e1
--- /dev/null
+++ b/drivers/net/qede/qede_logs.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_LOGS_H_
+#define _QEDE_LOGS_H_
+
+#define DP_ERR(p_dev, fmt, ...) \
+	rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, \
+		"[%s:%d(%s)]" fmt, \
+		  __func__, __LINE__, \
+		(p_dev)->name ? (p_dev)->name : "", \
+		##__VA_ARGS__)
+
+#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
+do {  \
+	rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+		"[QEDE PMD: (%s)]%s:" fmt, \
+		(p_dev)->name ? (p_dev)->name : "", \
+		 __func__, \
+		##__VA_ARGS__); \
+	OSAL_ASSERT(!is_assert); \
+} while (0)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+
+#define DP_INFO(p_dev, fmt, ...) \
+	rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \
+		"[%s:%d(%s)]" fmt, \
+		__func__, __LINE__, \
+		(p_dev)->name ? (p_dev)->name : "", \
+		##__VA_ARGS__)
+#else
+#define DP_INFO(p_dev, fmt, ...) do { } while (0)
+
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_ECORE
+#define DP_VERBOSE(p_dev, module, fmt, ...) \
+do { \
+	if ((p_dev)->dp_module & module) \
+		rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_PMD, \
+			"[%s:%d(%s)]" fmt, \
+		      __func__, __LINE__, \
+		      (p_dev)->name ? (p_dev)->name : "", \
+		      ##__VA_ARGS__); \
+} while (0)
+#else
+#define DP_VERBOSE(p_dev, fmt, ...) do { } while (0)
+#endif
+
+#define PMD_INIT_LOG(level, edev, fmt, args...)	\
+	rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+		"[qede_pmd: %s] %s() " fmt "\n", \
+	(edev)->name, __func__, ##args)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE(edev) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+#define PMD_TX_LOG(level, q, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+		__func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+#define PMD_RX_LOG(level, q, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n",	\
+		__func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _QEDE_LOGS_H_ */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
new file mode 100644
index 0000000..7a1b986
--- /dev/null
+++ b/drivers/net/qede/qede_main.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <zlib.h>
+
+#include "qede_ethdev.h"
+
+
+static uint8_t npar_tx_switching = 1;
+
+#define CONFIG_QED_BINARY_FW
+
+#ifdef RTE_LIBRTE_QEDE_TX_SWITCHING
+static uint8_t tx_switching = 1;
+#else
+static uint8_t tx_switching;
+#endif
+
+#ifndef RTE_LIBRTE_QEDE_FW
+const char *QEDE_FW_FILE_NAME =
+	"/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin";
+#else
+const char *QEDE_FW_FILE_NAME = RTE_LIBRTE_QEDE_FW;
+#endif
+
+static void
+qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
+{
+	int i;
+
+	for (i = 0; i < edev->num_hwfns; i++) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+		p_hwfn->pf_params = *params;
+	}
+}
+
+static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
+{
+	edev->regview = pci_dev->mem_resource[0].addr;
+	edev->doorbells = pci_dev->mem_resource[2].addr;
+}
+
+static int
+qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
+	  enum qed_protocol protocol, uint32_t dp_module,
+	  uint8_t dp_level, bool is_vf)
+{
+	struct qede_dev *qdev = (struct qede_dev *)edev;
+	int rc;
+
+	ecore_init_struct(edev);
+	qdev->protocol = protocol;
+	if (is_vf) {
+		edev->b_is_vf = true;
+		edev->sriov_info.b_hw_channel = true;
+	}
+	ecore_init_dp(edev, dp_module, dp_level, NULL);
+	qed_init_pci(edev, pci_dev);
+	rc = ecore_hw_prepare(edev, ECORE_PCI_DEFAULT);
+	if (rc) {
+		DP_ERR(edev, "hw prepare failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qed_nic_setup(struct ecore_dev *edev)
+{
+	int rc, i;
+
+	rc = ecore_resc_alloc(edev);
+	if (rc)
+		return rc;
+
+	DP_INFO(edev, "Allocated qed resources\n");
+	ecore_resc_setup(edev);
+
+	return rc;
+}
+
+static int qed_alloc_stream_mem(struct ecore_dev *edev)
+{
+	int i;
+
+	for_each_hwfn(edev, i) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+		p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+					     sizeof(*p_hwfn->stream));
+		if (!p_hwfn->stream)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void qed_free_stream_mem(struct ecore_dev *edev)
+{
+	int i;
+
+	for_each_hwfn(edev, i) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+		if (!p_hwfn->stream)
+			return;
+
+		OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
+	}
+}
+
+static int qed_load_firmware_data(struct ecore_dev *edev)
+{
+	int fd;
+	struct stat st;
+
+	fd = open(QEDE_FW_FILE_NAME, O_RDONLY);
+	if (fd < 0) {
+		DP_NOTICE(edev, false, "Can't open firmware file\n");
+		return -ENOENT;
+	}
+
+	if (fstat(fd, &st) < 0) {
+		DP_NOTICE(edev, false, "Can't stat firmware file\n");
+		return -1;
+	}
+
+	edev->firmware = rte_zmalloc("qede_fw", st.st_size,
+				    RTE_CACHE_LINE_SIZE);
+	if (!edev->firmware) {
+		DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
+		close(fd);
+		return -ENOMEM;
+	}
+
+	if (read(fd, edev->firmware, st.st_size) != st.st_size) {
+		DP_NOTICE(edev, false, "Can't read firmware data\n");
+		close(fd);
+		return -1;
+	}
+
+	edev->fw_len = st.st_size;
+	if (edev->fw_len < 104) {
+		DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64"\n",
+			  edev->fw_len);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qed_slowpath_start(struct ecore_dev *edev,
+			      struct qed_slowpath_params *params)
+{
+	bool allow_npar_tx_switching;
+	const uint8_t *data = NULL;
+	struct ecore_hwfn *hwfn;
+	struct ecore_mcp_drv_version drv_version;
+	struct qede_dev *qdev = (struct qede_dev *)edev;
+	int rc;
+#ifdef QED_ENC_SUPPORTED
+	struct ecore_tunn_start_params tunn_info;
+#endif
+
+#ifdef CONFIG_QED_BINARY_FW
+	rc = qed_load_firmware_data(edev);
+	if (rc) {
+		DP_NOTICE(edev, true,
+			  "Failed to find fw file %s\n",
+			  QEDE_FW_FILE_NAME);
+		goto err;
+	}
+#endif
+
+	rc = qed_nic_setup(edev);
+	if (rc)
+		goto err;
+
+	/* set int_coalescing_mode */
+	edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
+
+	/* Should go with CONFIG_QED_BINARY_FW */
+	/* Allocate stream for unzipping */
+	rc = qed_alloc_stream_mem(edev);
+	if (rc) {
+		DP_NOTICE(edev, true,
+		"Failed to allocate stream memory\n");
+		goto err2;
+	}
+
+	/* Start the slowpath */
+#ifdef CONFIG_QED_BINARY_FW
+	data = edev->firmware;
+#endif
+	allow_npar_tx_switching = npar_tx_switching ? true : false;
+
+#ifdef QED_ENC_SUPPORTED
+	memset(&tunn_info, 0, sizeof(tunn_info));
+	tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
+	    1 << QED_MODE_L2GRE_TUNN |
+	    1 << QED_MODE_IPGRE_TUNN |
+	    1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN;
+	tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+	tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+	tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+	rc = ecore_hw_init(edev, &tunn_info, true, ECORE_INT_MODE_MSIX,
+			   allow_npar_tx_switching, data);
+#else
+	rc = ecore_hw_init(edev, NULL, true, ECORE_INT_MODE_MSIX,
+			   allow_npar_tx_switching, data);
+#endif
+	if (rc) {
+		DP_ERR(edev, "ecore_hw_init failed\n");
+		goto err2;
+	}
+
+	DP_INFO(edev, "HW inited and function started\n");
+
+	hwfn = ECORE_LEADING_HWFN(edev);
+	drv_version.version = (params->drv_major << 24) |
+		    (params->drv_minor << 16) |
+		    (params->drv_rev << 8) | (params->drv_eng);
+	/* TBD: strlcpy() */
+	strncpy((char *)drv_version.name, (const char *)params->name,
+			MCP_DRV_VER_STR_SIZE - 4);
+	rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+						&drv_version);
+	if (rc) {
+		DP_NOTICE(edev, true,
+			  "Failed sending drv version command\n");
+		return rc;
+	}
+
+	return 0;
+
+	ecore_hw_stop(edev);
+err2:
+	ecore_resc_free(edev);
+err:
+#ifdef CONFIG_QED_BINARY_FW
+	if (edev->firmware)
+		rte_free(edev->firmware);
+	edev->firmware = NULL;
+#endif
+	return rc;
+}
+
+static int
+qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
+{
+	struct ecore_ptt *ptt = NULL;
+
+	memset(dev_info, 0, sizeof(struct qed_dev_info));
+	dev_info->num_hwfns = edev->num_hwfns;
+	dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
+	rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+	       ETHER_ADDR_LEN);
+
+	dev_info->fw_major = FW_MAJOR_VERSION;
+	dev_info->fw_minor = FW_MINOR_VERSION;
+	dev_info->fw_rev = FW_REVISION_VERSION;
+	dev_info->fw_eng = FW_ENGINEERING_VERSION;
+	dev_info->mf_mode = edev->mf_mode;
+	dev_info->tx_switching = tx_switching ? true : false;
+
+	ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
+	if (ptt) {
+		ecore_mcp_get_mfw_ver(edev, ptt,
+					      &dev_info->mfw_rev, NULL);
+
+		ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
+						 &dev_info->flash_size);
+
+		/* Workaround to allow PHY-read commands for
+		 * B0 bringup.
+		 */
+		if (ECORE_IS_BB_B0(edev))
+			dev_info->flash_size = 0xffffffff;
+
+		ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
+	}
+
+	return 0;
+}
+
+int
+qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
+{
+	struct qede_dev *qdev = (struct qede_dev *)edev;
+	int i;
+
+	memset(info, 0, sizeof(*info));
+
+	info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
+
+	info->num_queues = 0;
+	for_each_hwfn(edev, i)
+		    info->num_queues +=
+		    FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
+
+	info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);
+
+	rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+			   ETHER_ADDR_LEN);
+
+	qed_fill_dev_info(edev, &info->common);
+
+	return 0;
+}
+
+static void
+qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
+	   const char ver_str[VER_SIZE])
+{
+	int i;
+
+	rte_memcpy(edev->name, name, NAME_SIZE);
+	for_each_hwfn(edev, i) {
+		snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+	}
+	rte_memcpy(edev->ver_str, ver_str, VER_SIZE);
+	edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static uint32_t
+qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
+	    void *sb_virt_addr, dma_addr_t sb_phy_addr,
+	    uint16_t sb_id, enum qed_sb_type type)
+{
+	struct ecore_hwfn *p_hwfn;
+	int hwfn_index;
+	uint16_t rel_sb_id;
+	uint8_t n_hwfns;
+	uint32_t rc;
+
+	/* RoCE uses single engine and CMT uses two engines. When using both
+	 * we force only a single engine. Storage uses only engine 0 too.
+	 */
+	if (type == QED_SB_TYPE_L2_QUEUE)
+		n_hwfns = edev->num_hwfns;
+	else
+		n_hwfns = 1;
+
+	hwfn_index = sb_id % n_hwfns;
+	p_hwfn = &edev->hwfns[hwfn_index];
+	rel_sb_id = sb_id / n_hwfns;
+
+	DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+		hwfn_index, rel_sb_id, sb_id);
+
+	rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+			       sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+	return rc;
+}
+
+static void qed_fill_link(struct ecore_hwfn *hwfn,
+			  struct qed_link_output *if_link)
+{
+	struct ecore_mcp_link_params params;
+	struct ecore_mcp_link_state link;
+	struct ecore_mcp_link_capabilities link_caps;
+	uint32_t media_type;
+	uint8_t change = 0;
+
+	memset(if_link, 0, sizeof(*if_link));
+
+	/* Prepare source inputs */
+	rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
+		       sizeof(params));
+	rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
+	rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
+		       sizeof(link_caps));
+
+	/* Set the link parameters to pass to protocol driver */
+	if (link.link_up)
+		if_link->link_up = true;
+
+	if (link.link_up)
+		if_link->speed = link.speed;
+
+	if_link->duplex = QEDE_DUPLEX_FULL;
+
+	if (params.speed.autoneg)
+		if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
+
+	if (params.pause.autoneg || params.pause.forced_rx ||
+	    params.pause.forced_tx)
+		if_link->supported_caps |= QEDE_SUPPORTED_PAUSE;
+
+	if (params.pause.autoneg)
+		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+
+	if (params.pause.forced_rx)
+		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+	if (params.pause.forced_tx)
+		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+}
+
+static void
+qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
+{
+	qed_fill_link(&edev->hwfns[0], if_link);
+
+#ifdef CONFIG_QED_SRIOV
+	for_each_hwfn(cdev, i)
+		qed_inform_vf_link_state(&cdev->hwfns[i]);
+#endif
+}
+
+static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
+{
+	struct ecore_hwfn *hwfn;
+	struct ecore_ptt *ptt;
+	struct ecore_mcp_link_params *link_params;
+	int rc;
+
+	/* The link should be set only once per PF */
+	hwfn = &edev->hwfns[0];
+
+	ptt = ecore_ptt_acquire(hwfn);
+	if (!ptt)
+		return -EBUSY;
+
+	link_params = ecore_mcp_get_link_params(hwfn);
+	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+		link_params->speed.autoneg = params->autoneg;
+
+	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+			link_params->pause.autoneg = true;
+		else
+			link_params->pause.autoneg = false;
+		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+			link_params->pause.forced_rx = true;
+		else
+			link_params->pause.forced_rx = false;
+		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+			link_params->pause.forced_tx = true;
+		else
+			link_params->pause.forced_tx = false;
+	}
+
+	rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
+
+	ecore_ptt_release(hwfn, ptt);
+
+	return rc;
+}
+
+static int qed_drain(struct ecore_dev *edev)
+{
+	struct ecore_hwfn *hwfn;
+	struct ecore_ptt *ptt;
+	int i, rc;
+
+	for_each_hwfn(edev, i) {
+		hwfn = &edev->hwfns[i];
+		ptt = ecore_ptt_acquire(hwfn);
+		if (!ptt) {
+			DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
+			return -EBUSY;
+		}
+		rc = ecore_mcp_drain(hwfn, ptt);
+		if (rc)
+			return rc;
+		ecore_ptt_release(hwfn, ptt);
+	}
+
+	return 0;
+}
+
+static int qed_nic_stop(struct ecore_dev *edev)
+{
+	int i, rc;
+
+	rc = ecore_hw_stop(edev);
+	for (i = 0; i < edev->num_hwfns; i++) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+		if (p_hwfn->b_sp_dpc_enabled)
+			p_hwfn->b_sp_dpc_enabled = false;
+	}
+	return rc;
+}
+
+static int qed_nic_reset(struct ecore_dev *edev)
+{
+	int rc;
+
+	rc = ecore_hw_reset(edev);
+	if (rc)
+		return rc;
+
+	ecore_resc_free(edev);
+
+	return 0;
+}
+
+static int qed_slowpath_stop(struct ecore_dev *edev)
+{
+#ifdef CONFIG_QED_SRIOV
+	int i;
+#endif
+
+	if (!edev)
+		return -ENODEV;
+
+	qed_free_stream_mem(edev);
+
+	qed_nic_stop(edev);
+
+	qed_nic_reset(edev);
+
+	return 0;
+}
+
+static void qed_remove(struct ecore_dev *edev)
+{
+	if (!edev)
+		return;
+
+	ecore_hw_remove(edev);
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+	INIT_STRUCT_FIELD(probe, &qed_probe),
+	INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
+	INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
+	INIT_STRUCT_FIELD(set_id, &qed_set_id),
+	INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
+	INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
+	INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
+	INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
+	INIT_STRUCT_FIELD(set_link, &qed_set_link),
+	INIT_STRUCT_FIELD(drain, &qed_drain),
+	INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
+	INIT_STRUCT_FIELD(remove, &qed_remove),
+};
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
new file mode 100644
index 0000000..d0450f7
--- /dev/null
+++ b/drivers/net/qede/qede_rxtx.c
@@ -0,0 +1,1172 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_rxtx.h"
+
+static bool gro_disable = 1;	/* mod_param */
+
+static inline struct
+rte_mbuf *qede_rxmbuf_alloc(struct rte_mempool *mp)
+{
+	struct rte_mbuf *m;
+
+	m = __rte_mbuf_raw_alloc(mp);
+	__rte_mbuf_sanity_check(m, 0);
+
+	return m;
+}
+
+static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+	struct rte_mbuf *new_mb = NULL;
+	struct eth_rx_bd *rx_bd;
+	dma_addr_t mapping;
+	uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+	new_mb = qede_rxmbuf_alloc(rxq->mb_pool);
+	if (unlikely(!new_mb)) {
+		PMD_RX_LOG(ERR, rxq,
+			   "Failed to allocate rx buffer "
+			   "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+			   idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+			   rte_mempool_count(rxq->mb_pool),
+			   rte_mempool_free_count(rxq->mb_pool));
+		return -ENOMEM;
+	}
+	rxq->sw_rx_ring[idx].mbuf = new_mb;
+	rxq->sw_rx_ring[idx].page_offset = 0;
+	mapping = new_mb->buf_physaddr;
+	/* Advance PROD and get BD pointer */
+	rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
+	rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+	rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+	rxq->sw_rx_prod++;
+	return 0;
+}
+
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+	uint16_t i;
+
+	if (rxq->sw_rx_ring != NULL) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_rx_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+				rxq->sw_rx_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+	struct qede_rx_queue *rxq = rx_queue;
+
+	if (rxq != NULL) {
+		qede_rx_queue_release_mbufs(rxq);
+		rte_free(rxq->sw_rx_ring);
+		rxq->sw_rx_ring = NULL;
+		rte_free(rxq);
+		rx_queue = NULL;
+	}
+}
+
+static uint16_t qede_set_rx_buf_size(struct rte_mempool *mp, uint16_t len)
+{
+	uint16_t data_size;
+	uint16_t buf_size;
+
+	data_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+	buf_size = RTE_MAX(len, data_size);
+	return buf_size + QEDE_ETH_OVERHEAD;
+}
+
+int
+qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+		    uint16_t nb_desc, unsigned int socket_id,
+		    const struct rte_eth_rxconf *rx_conf,
+		    struct rte_mempool *mp)
+{
+	struct qede_dev *qdev = dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct rte_eth_dev_data *eth_data = dev->data;
+	struct qede_rx_queue *rxq;
+	uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	size_t size;
+	int rc;
+	int i;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	/* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
+	if (!rte_is_power_of_2(nb_desc)) {
+		DP_NOTICE(edev, false, "Ring size %u is not power of 2\n",
+			  nb_desc);
+		return -EINVAL;
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the rx queue data structure */
+	rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (!rxq) {
+		DP_NOTICE(edev, false,
+			  "Unable to allocate memory for rxq on socket %u",
+			  socket_id);
+		return -ENOMEM;
+	}
+
+	rxq->qdev = qdev;
+	rxq->mb_pool = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->queue_id = queue_idx;
+	rxq->port_id = dev->data->port_id;
+
+	rxq->rx_buf_size = qede_set_rx_buf_size(mp, pkt_len);
+	if (pkt_len > ETHER_MAX_LEN) {
+		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		DP_NOTICE(edev, false, "jumbo frame enabled\n");
+	} else {
+		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+	}
+
+	qdev->mtu = rxq->rx_buf_size;
+	DP_INFO(edev, "rx_buf_size=%u\n", qdev->mtu);
+
+	/* Allocate the parallel driver ring for Rx buffers */
+	size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
+	rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
+					     RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_rx_ring) {
+		DP_NOTICE(edev, false,
+			  "Unable to alloc memory for sw_rx_ring on socket %u\n",
+			  socket_id);
+		rte_free(rxq);
+		rxq = NULL;
+		return -ENOMEM;
+	}
+
+	/* Allocate FW Rx ring  */
+	rc = qdev->ops->common->chain_alloc(edev,
+					    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+					    ECORE_CHAIN_MODE_NEXT_PTR,
+					    ECORE_CHAIN_CNT_TYPE_U16,
+					    rxq->nb_rx_desc,
+					    sizeof(struct eth_rx_bd),
+					    &rxq->rx_bd_ring);
+
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(edev, false,
+			  "Unable to alloc memory for rxbd ring on socket %u\n",
+			  socket_id);
+		rte_free(rxq->sw_rx_ring);
+		rxq->sw_rx_ring = NULL;
+		rte_free(rxq);
+		rxq = NULL;
+	}
+
+	/* Allocate FW completion ring */
+	rc = qdev->ops->common->chain_alloc(edev,
+					    ECORE_CHAIN_USE_TO_CONSUME,
+					    ECORE_CHAIN_MODE_PBL,
+					    ECORE_CHAIN_CNT_TYPE_U16,
+					    rxq->nb_rx_desc,
+					    sizeof(union eth_rx_cqe),
+					    &rxq->rx_comp_ring);
+
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(edev, false,
+			  "Unable to alloc memory for cqe ring on socket %u\n",
+			  socket_id);
+		/* TBD: Freeing RX BD ring */
+		rte_free(rxq->sw_rx_ring);
+		rxq->sw_rx_ring = NULL;
+		rte_free(rxq);
+	}
+
+	/* Allocate buffers for the Rx ring */
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		rc = qede_alloc_rx_buffer(rxq);
+		if (rc) {
+			DP_NOTICE(edev, false,
+				  "RX buffer allocation failed at idx=%d\n", i);
+			goto err4;
+		}
+	}
+
+	dev->data->rx_queues[queue_idx] = rxq;
+	if (!qdev->rx_queues)
+		qdev->rx_queues = (struct qede_rx_queue **)dev->data->rx_queues;
+
+	DP_NOTICE(edev, false, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
+		  queue_idx, nb_desc, qdev->mtu, socket_id);
+
+	return 0;
+err4:
+	qede_rx_queue_release(rxq);
+	return -ENOMEM;
+}
+
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
+{
+	unsigned i;
+
+	PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+
+	if (txq->sw_tx_ring != NULL) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_tx_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+				txq->sw_tx_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+void qede_tx_queue_release(void *tx_queue)
+{
+	struct qede_tx_queue *txq = tx_queue;
+
+	if (txq != NULL) {
+		qede_tx_queue_release_mbufs(txq);
+		if (txq->sw_tx_ring) {
+			rte_free(txq->sw_tx_ring);
+			txq->sw_tx_ring = NULL;
+		}
+		rte_free(txq);
+	}
+	tx_queue = NULL;
+}
+
+int
+qede_tx_queue_setup(struct rte_eth_dev *dev,
+		    uint16_t queue_idx,
+		    uint16_t nb_desc,
+		    unsigned int socket_id,
+		    const struct rte_eth_txconf *tx_conf)
+{
+	struct qede_dev *qdev = dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qede_tx_queue *txq;
+	int rc;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (!rte_is_power_of_2(nb_desc)) {
+		DP_NOTICE(edev, false, "Ring size %u is not power of 2\n",
+			  nb_desc);
+		return -EINVAL;
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (txq == NULL) {
+		DP_ERR(edev,
+		       "Unable to allocate memory for txq on socket %u",
+		       socket_id);
+		return -ENOMEM;
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->qdev = qdev;
+	txq->port_id = dev->data->port_id;
+
+	rc = qdev->ops->common->chain_alloc(edev,
+					    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+					    ECORE_CHAIN_MODE_PBL,
+					    ECORE_CHAIN_CNT_TYPE_U16,
+					    txq->nb_tx_desc,
+					    sizeof(union eth_tx_bd_types),
+					    &txq->tx_pbl);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(edev,
+		       "Unable to allocate memory for txbd ring on socket %u",
+		       socket_id);
+		qede_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+
+	/* Allocate software ring */
+	txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
+					     (sizeof(struct qede_tx_entry) *
+					      txq->nb_tx_desc),
+					     RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (!txq->sw_tx_ring) {
+		DP_ERR(edev,
+		       "Unable to allocate memory for txbd ring on socket %u",
+		       socket_id);
+		qede_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+
+	txq->queue_id = queue_idx;
+
+	txq->nb_tx_avail = txq->nb_tx_desc;
+
+	txq->tx_free_thresh =
+	    tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
+	    (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
+
+	dev->data->tx_queues[queue_idx] = txq;
+	if (!qdev->tx_queues)
+		qdev->tx_queues = (struct qede_tx_queue **)dev->data->tx_queues;
+
+	txq->txq_counter = 0;
+
+	DP_NOTICE(edev, false,
+		  "txq %u num_desc %u tx_free_thresh %u socket %u\n",
+		  queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
+
+	return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
+static void qede_init_fp(struct qede_dev *qdev)
+{
+	struct qede_fastpath *fp;
+	int rss_id, txq_index, tc;
+
+	memset((void *)qdev->fp_array, 0, (QEDE_RSS_CNT(qdev) *
+					   sizeof(*qdev->fp_array)));
+	memset((void *)qdev->sb_array, 0, (QEDE_RSS_CNT(qdev) *
+					   sizeof(*qdev->sb_array)));
+	for_each_rss(rss_id) {
+		fp = &qdev->fp_array[rss_id];
+
+		fp->qdev = qdev;
+		fp->rss_id = rss_id;
+
+		/* Point rxq to generic rte queues that was created
+		 * as part of queue creation.
+		 */
+		fp->rxq = qdev->rx_queues[rss_id];
+		fp->sb_info = &qdev->sb_array[rss_id];
+
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			txq_index = tc * QEDE_RSS_CNT(qdev) + rss_id;
+			fp->txqs[tc] = qdev->tx_queues[txq_index];
+			fp->txqs[tc]->queue_id = txq_index;
+			/* Updating it to main structure */
+			snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+				 "qdev", rss_id);
+		}
+	}
+
+	qdev->gro_disable = gro_disable;
+}
+
+void qede_free_fp_arrays(struct qede_dev *qdev)
+{
+	/* It asseumes qede_free_mem_load() is called before */
+	if (qdev->fp_array != NULL) {
+		rte_free(qdev->fp_array);
+		qdev->fp_array = NULL;
+	}
+
+	if (qdev->sb_array != NULL) {
+		rte_free(qdev->sb_array);
+		qdev->sb_array = NULL;
+	}
+}
+
+int qede_alloc_fp_array(struct qede_dev *qdev)
+{
+	struct qede_fastpath *fp;
+	struct ecore_dev *edev = &qdev->edev;
+	int i;
+
+	qdev->fp_array = rte_calloc("fp", QEDE_RSS_CNT(qdev),
+				    sizeof(*qdev->fp_array),
+				    RTE_CACHE_LINE_SIZE);
+
+	if (!qdev->fp_array) {
+		DP_NOTICE(edev, true, "fp array allocation failed\n");
+		return -ENOMEM;
+	}
+
+	qdev->sb_array = rte_calloc("sb", QEDE_RSS_CNT(qdev),
+				    sizeof(*qdev->sb_array),
+				    RTE_CACHE_LINE_SIZE);
+
+	if (!qdev->sb_array) {
+		DP_NOTICE(edev, true, "sb array allocation failed\n");
+		rte_free(qdev->fp_array);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* This function allocates fast-path status block memory */
+static int
+qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
+		  uint16_t sb_id)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct status_block *sb_virt;
+	dma_addr_t sb_phys;
+	int rc;
+
+	sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
+
+	if (!sb_virt) {
+		DP_ERR(edev, "Status block allocation failed\n");
+		return -ENOMEM;
+	}
+
+	rc = qdev->ops->common->sb_init(edev, sb_info,
+					sb_virt, sb_phys, sb_id,
+					QED_SB_TYPE_L2_QUEUE);
+	if (rc) {
+		DP_ERR(edev, "Status block initialization failed\n");
+		/* TBD: No dma_free_coherent possible */
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qede_alloc_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
+{
+	return qede_alloc_mem_sb(qdev, fp->sb_info, fp->rss_id);
+}
+
+static void qede_shrink_txq(struct qede_dev *qdev, uint16_t num_rss)
+{
+	/* @@@TBD - this should also re-set the qed interrupts */
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *qdev)
+{
+	int rc = 0, rss_id;
+	struct ecore_dev *edev = &qdev->edev;
+
+	for (rss_id = 0; rss_id < QEDE_RSS_CNT(qdev); rss_id++) {
+		struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+
+		rc = qede_alloc_mem_fp(qdev, fp);
+		if (rc)
+			break;
+	}
+
+	if (rss_id != QEDE_RSS_CNT(qdev)) {
+		/* Failed allocating memory for all the queues */
+		if (!rss_id) {
+			DP_ERR(edev,
+			       "Failed to alloc memory for leading queue\n");
+			rc = -ENOMEM;
+		} else {
+			DP_NOTICE(edev, false,
+				  "Failed to allocate memory for all of "
+				  "RSS queues\n"
+				  "Desired: %d queues, allocated: %d queues\n",
+				  QEDE_RSS_CNT(qdev), rss_id);
+			qede_shrink_txq(qdev, rss_id);
+		}
+		qdev->num_rss = rss_id;
+	}
+
+	return 0;
+}
+
+static inline void
+qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+	uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+	uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
+	struct eth_rx_prod_data rx_prods = { 0 };
+
+	/* Update producers */
+	rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
+	rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
+
+	/* Make sure that the BD and SGE data is updated before updating the
+	 * producers since FW might read the BD/SGE right after the producer
+	 * is updated.
+	 */
+	rte_wmb();
+
+	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+			(uint32_t *)&rx_prods);
+
+	/* mmiowb is needed to synchronize doorbell writes from more than one
+	 * processor. It guarantees that the write arrives to the device before
+	 * the napi lock is released and another qede_poll is called (possibly
+	 * on another CPU). Without this barrier, the next doorbell can bypass
+	 * this doorbell. This is applicable to IA64/Altix systems.
+	 */
+	rte_wmb();
+
+	PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u\n", bd_prod, cqe_prod);
+}
+
+static inline uint32_t
+qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
+{
+	return index % n_rx_rings;
+}
+
+#ifdef ENC_SUPPORTED
+static bool qede_tunn_exist(uint16_t flag)
+{
+	return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+		    PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
+}
+
+static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+{
+	uint8_t tcsum = 0;
+	uint16_t csum_flag = 0;
+
+	if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+	     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
+		csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+		    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+	if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+	     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+		    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+		tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+	}
+
+	csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+	    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+	    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+	    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+	if (csum_flag & flag)
+		return QEDE_CSUM_ERROR;
+
+	return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+#else
+static inline uint8_t qede_tunn_exist(uint16_t flag)
+{
+	return 0;
+}
+
+static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+{
+	return 0;
+}
+#endif
+
+static inline uint8_t qede_check_notunn_csum(uint16_t flag)
+{
+	uint8_t csum = 0;
+	uint16_t csum_flag = 0;
+
+	if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+	     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+		    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+		csum = QEDE_CSUM_UNNECESSARY;
+	}
+
+	csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+	    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+	if (csum_flag & flag)
+		return QEDE_CSUM_ERROR;
+
+	return csum;
+}
+
+static inline uint8_t qede_check_csum(uint16_t flag)
+{
+	if (likely(!qede_tunn_exist(flag)))
+		return qede_check_notunn_csum(flag);
+	else
+		return qede_check_tunn_csum(flag);
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+	ecore_chain_consume(&rxq->rx_bd_ring);
+	rxq->sw_rx_cons++;
+}
+
+static inline void
+qede_reuse_page(struct qede_dev *qdev,
+		struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+{
+	struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
+	uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+	struct qede_rx_entry *curr_prod;
+	dma_addr_t new_mapping;
+
+	curr_prod = &rxq->sw_rx_ring[idx];
+	*curr_prod = *curr_cons;
+
+	new_mapping = curr_prod->mbuf->buf_physaddr + curr_prod->page_offset;
+
+	rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
+	rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
+
+	rxq->sw_rx_prod++;
+}
+
+static inline void
+qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+			struct qede_dev *qdev, uint8_t count)
+{
+	struct qede_rx_entry *curr_cons;
+
+	for (; count > 0; count--) {
+		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+		qede_reuse_page(qdev, rxq, curr_cons);
+		qede_rx_bd_ring_consume(rxq);
+	}
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+{
+	uint32_t p_type;
+	/* TBD - L4 indications needed ? */
+	uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+			      PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
+
+	/* protocol = 3 means LLC/SNAP over Ethernet */
+	if (unlikely(protocol == 0 || protocol == 3))
+		p_type = RTE_PTYPE_UNKNOWN;
+	else if (protocol == 1)
+		p_type = RTE_PTYPE_L3_IPV4;
+	else if (protocol == 2)
+		p_type = RTE_PTYPE_L3_IPV6;
+
+	return RTE_PTYPE_L2_ETHER | p_type;
+}
+
+uint16_t
+qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	struct qede_rx_queue *rxq = p_rxq;
+	struct qede_dev *qdev = rxq->qdev;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
+	uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
+	uint16_t rx_pkt = 0;
+	union eth_rx_cqe *cqe;
+	struct eth_fast_path_rx_reg_cqe *fp_cqe;
+	register struct rte_mbuf *rx_mb = NULL;
+	enum eth_rx_cqe_type cqe_type;
+	uint16_t len, pad;
+	uint16_t preload_idx;
+	uint8_t csum_flag;
+	uint16_t parse_flag;
+
+	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+	sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+	rte_rmb();
+
+	if (hw_comp_cons == sw_comp_cons)
+		return 0;
+
+	while (sw_comp_cons != hw_comp_cons) {
+		/* Get the CQE from the completion ring */
+		cqe =
+		    (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+		cqe_type = cqe->fast_path_regular.type;
+
+		if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+			PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
+
+			qdev->ops->eth_cqe_completion(edev, fp->rss_id,
+				(struct eth_slow_path_rx_cqe *)cqe);
+			goto next_cqe;
+		}
+
+		/* Get the data from the SW ring */
+		sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+		rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+		assert(rx_mb != NULL);
+
+		/* non GRO */
+		fp_cqe = &cqe->fast_path_regular;
+
+		len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+		pad = fp_cqe->placement_offset;
+		PMD_RX_LOG(DEBUG, rxq,
+			   "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
+			   " len = %u, parsing_flags = %d\n",
+			   cqe_type, fp_cqe->bitfields,
+			   rte_le_to_cpu_16(fp_cqe->vlan_tag),
+			   len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
+
+		/* If this is an error packet then drop it */
+		parse_flag =
+		    rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
+		csum_flag = qede_check_csum(parse_flag);
+		if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+			PMD_RX_LOG(ERR, rxq,
+				   "CQE in CONS = %u has error, flags = 0x%x "
+				   "dropping incoming packet\n",
+				   sw_comp_cons, parse_flag);
+			rxq->rx_hw_errors++;
+			qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+			goto next_cqe;
+		}
+
+		if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+			PMD_RX_LOG(ERR, rxq,
+				   "New buffer allocation failed,"
+				   "dropping incoming packet\n");
+			qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+			rte_eth_devices[rxq->port_id].
+			    data->rx_mbuf_alloc_failed++;
+			rxq->rx_alloc_errors++;
+			break;
+		}
+
+		qede_rx_bd_ring_consume(rxq);
+
+		/* Prefetch next mbuf while processing current one. */
+		preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+		rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+
+		if (fp_cqe->bd_num != 1)
+			PMD_RX_LOG(DEBUG, rxq,
+				   "Jumbo-over-BD packet not supported\n");
+
+		rx_mb->buf_len = len + pad;
+		rx_mb->data_off = pad;
+		rx_mb->nb_segs = 1;
+		rx_mb->data_len = len;
+		rx_mb->pkt_len = len;
+		rx_mb->port = rxq->port_id;
+		rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
+		rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
+
+		if (CQE_HAS_VLAN(parse_flag) ||
+		    CQE_HAS_OUTER_VLAN(parse_flag)) {
+			rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+			rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
+		}
+
+		rx_pkts[rx_pkt] = rx_mb;
+		rx_pkt++;
+next_cqe:
+		ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
+		sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+		if (rx_pkt == nb_pkts) {
+			PMD_RX_LOG(DEBUG, rxq,
+				   "Budget reached nb_pkts=%u received=%u\n",
+				   rx_pkt, nb_pkts);
+			break;
+		}
+	}
+
+	qede_update_rx_prod(qdev, rxq);
+
+	PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
+
+	return rx_pkt;
+}
+
+static inline int
+qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
+{
+	uint16_t idx = TX_CONS(txq);
+	struct eth_tx_bd *tx_data_bd;
+	struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
+
+	if (unlikely(!mbuf)) {
+		PMD_TX_LOG(ERR, txq,
+			   "null mbuf nb_tx_desc %u nb_tx_avail %u "
+			   "sw_tx_cons %u sw_tx_prod %u\n",
+			   txq->nb_tx_desc, txq->nb_tx_avail, idx,
+			   TX_PROD(txq));
+		return -1;
+	}
+
+	/* Free now */
+	rte_pktmbuf_free_seg(mbuf);
+	txq->sw_tx_ring[idx].mbuf = NULL;
+	ecore_chain_consume(&txq->tx_pbl);
+	txq->nb_tx_avail++;
+
+	return 0;
+}
+
+static inline uint16_t
+qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
+{
+	uint16_t tx_compl = 0;
+	uint16_t hw_bd_cons;
+	int rc;
+
+	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+	rte_compiler_barrier();
+
+	while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
+		rc = qede_free_tx_pkt(edev, txq);
+		if (rc) {
+			DP_NOTICE(edev, true,
+				  "hw_bd_cons = %d, chain_cons=%d\n",
+				  hw_bd_cons,
+				  ecore_chain_get_cons_idx(&txq->tx_pbl));
+			break;
+		}
+		txq->sw_tx_cons++;	/* Making TXD available */
+		tx_compl++;
+	}
+
+	PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
+		   tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
+	return tx_compl;
+}
+
+uint16_t
+qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct qede_tx_queue *txq = p_txq;
+	struct qede_dev *qdev = txq->qdev;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qede_fastpath *fp = &qdev->fp_array[txq->queue_id];
+	struct eth_tx_1st_bd *first_bd;
+	uint16_t nb_tx_pkts;
+	uint16_t nb_pkt_sent = 0;
+	uint16_t bd_prod;
+	uint16_t idx;
+	uint16_t tx_count;
+
+	if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
+		PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
+			   nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
+		(void)qede_process_tx_compl(edev, txq);
+	}
+
+	nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS));
+	if (unlikely(nb_tx_pkts == 0)) {
+		PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
+			   nb_pkts, txq->nb_tx_avail);
+		return 0;
+	}
+
+	tx_count = nb_tx_pkts;
+	while (nb_tx_pkts--) {
+		/* Fill the entry in the SW ring and the BDs in the FW ring */
+		idx = TX_PROD(txq);
+		struct rte_mbuf *mbuf = *tx_pkts++;
+		txq->sw_tx_ring[idx].mbuf = mbuf;
+		first_bd = (struct eth_tx_1st_bd *)
+		    ecore_chain_produce(&txq->tx_pbl);
+		first_bd->data.bd_flags.bitfields =
+		    1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+		/* Map mbug linear data for DMA and set in the first BD */
+		QEDE_BD_SET_ADDR_LEN(first_bd, RTE_MBUF_DATA_DMA_ADDR(mbuf),
+				     mbuf->data_len);
+
+		/* Descriptor based VLAN insertion */
+		if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+			first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+			first_bd->data.bd_flags.bitfields |=
+			    1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+		}
+
+		/* Offload the IP checksum in the hardware */
+		if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+			first_bd->data.bd_flags.bitfields |=
+			    1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+		}
+
+		/* L4 checksum offload (tcp or udp) */
+		if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+			first_bd->data.bd_flags.bitfields |=
+			    1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+			/* IPv6 + extn. -> later */
+		}
+		first_bd->data.nbds = MAX_NUM_TX_BDS;
+		txq->sw_tx_prod++;
+		rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
+		txq->nb_tx_avail--;
+		bd_prod =
+		    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+		nb_pkt_sent++;
+	}
+
+	/* Write value of prod idx into bd_prod */
+	txq->tx_db.data.bd_prod = bd_prod;
+	rte_wmb();
+	rte_compiler_barrier();
+	DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
+	rte_wmb();
+
+	/* Check again for Tx completions if enabled */
+#ifdef RTE_LIBRTE_QEDE_TX_COMP_END
+	(void)qede_process_tx_compl(edev, txq);
+#endif
+
+	PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
+		   nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
+
+	return nb_pkt_sent;
+}
+
+int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_link_output link_output;
+	int rc;
+
+	DP_NOTICE(edev, false, "port %u\n", eth_dev->data->port_id);
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (qdev->state == QEDE_START) {
+		DP_INFO(edev, "device already started\n");
+		return 0;
+	}
+
+	if (qdev->state == QEDE_CLOSE) {
+		rc = qede_alloc_fp_array(qdev);
+		qede_init_fp(qdev);
+		rc = qede_alloc_mem_load(qdev);
+		DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+			QEDE_RSS_CNT(qdev), qdev->num_tc);
+	} else if (qdev->state == QEDE_STOP) {
+		DP_INFO(edev, "restarting port %u\n", eth_dev->data->port_id);
+	} else {
+		DP_INFO(edev, "unknown state port %u\n",
+			eth_dev->data->port_id);
+		return -EINVAL;
+	}
+
+	if (rc) {
+		DP_ERR(edev, "Failed to start queues\n");
+		/* TBD: free */
+		return rc;
+	}
+
+	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+	qede_dev_set_link_state(eth_dev, true);
+
+	/* Query whether link is already-up */
+	memset(&link_output, 0, sizeof(link_output));
+	qdev->ops->common->get_link(edev, &link_output);
+	DP_NOTICE(edev, false, "link status: %s\n",
+		  link_output.link_up ? "up" : "down");
+
+	qdev->state = QEDE_START;
+
+	qede_config_rx_mode(eth_dev);
+
+	DP_INFO(edev, "dev_state is QEDE_START\n");
+
+	return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+			  struct qede_tx_queue *txq, bool allow_drain)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	int rc, cnt = 1000;
+
+	while (txq->sw_tx_cons != txq->sw_tx_prod) {
+		qede_process_tx_compl(edev, txq);
+		if (!cnt) {
+			if (allow_drain) {
+				DP_NOTICE(edev, true,
+					  "Tx queue[%u] is stuck,"
+					  "requesting MCP to drain\n",
+					  txq->queue_id);
+				rc = qdev->ops->common->drain(edev);
+				if (rc)
+					return rc;
+				return qede_drain_txq(qdev, txq, false);
+			} else {
+				DP_NOTICE(edev, true,
+					  "Timeout waiting for tx queue[%d]:"
+					  "PROD=%d, CONS=%d\n",
+					  txq->queue_id, txq->sw_tx_prod,
+					  txq->sw_tx_cons);
+				return -ENODEV;
+			}
+		}
+		cnt--;
+		DELAY(1000);
+		rte_compiler_barrier();
+	}
+
+	/* FW finished processing, wait for HW to transmit all tx packets */
+	DELAY(2000);
+
+	return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *qdev)
+{
+	struct qed_update_vport_params vport_update_params;
+	struct ecore_dev *edev = &qdev->edev;
+	int rc, tc, i;
+
+	/* Disable the vport */
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = 0;
+	vport_update_params.update_vport_active_flg = 1;
+	vport_update_params.vport_active_flg = 0;
+	vport_update_params.update_rss_flg = 0;
+
+	DP_INFO(edev, "vport_update\n");
+
+	rc = qdev->ops->vport_update(edev, &vport_update_params);
+	if (rc) {
+		DP_ERR(edev, "Failed to update vport\n");
+		return rc;
+	}
+
+	DP_INFO(edev, "Flushing tx queues\n");
+
+	/* Flush Tx queues. If needed, request drain from MCP */
+	for_each_rss(i) {
+		struct qede_fastpath *fp = &qdev->fp_array[i];
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct qede_tx_queue *txq = fp->txqs[tc];
+			rc = qede_drain_txq(qdev, txq, true);
+			if (rc)
+				return rc;
+		}
+	}
+
+	/* Stop all Queues in reverse order */
+	for (i = QEDE_RSS_CNT(qdev) - 1; i >= 0; i--) {
+		struct qed_stop_rxq_params rx_params;
+
+		/* Stop the Tx Queue(s) */
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct qed_stop_txq_params tx_params;
+
+			tx_params.rss_id = i;
+			tx_params.tx_queue_id = tc * QEDE_RSS_CNT(qdev) + i;
+
+			DP_INFO(edev, "Stopping tx queues\n");
+			rc = qdev->ops->q_tx_stop(edev, &tx_params);
+			if (rc) {
+				DP_ERR(edev, "Failed to stop TXQ #%d\n",
+				       tx_params.tx_queue_id);
+				return rc;
+			}
+		}
+
+		/* Stop the Rx Queue */
+		memset(&rx_params, 0, sizeof(rx_params));
+		rx_params.rss_id = i;
+		rx_params.rx_queue_id = i;
+		rx_params.eq_completion_only = 1;
+
+		DP_INFO(edev, "Stopping rx queues\n");
+
+		rc = qdev->ops->q_rx_stop(edev, &rx_params);
+		if (rc) {
+			DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+			return rc;
+		}
+	}
+
+	DP_INFO(edev, "Stopping vports\n");
+
+	/* Stop the vport */
+	rc = qdev->ops->vport_stop(edev, 0);
+	if (rc)
+		DP_ERR(edev, "Failed to stop VPORT\n");
+
+	return rc;
+}
+
+void qede_reset_fp_rings(struct qede_dev *qdev)
+{
+	uint16_t rss_id;
+	uint8_t tc;
+
+	for_each_rss(rss_id) {
+		DP_INFO(&qdev->edev, "reset fp chain for rss %u\n", rss_id);
+		struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+		ecore_chain_reset(&fp->rxq->rx_bd_ring);
+		ecore_chain_reset(&fp->rxq->rx_comp_ring);
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct qede_tx_queue *txq = fp->txqs[tc];
+			ecore_chain_reset(&txq->tx_pbl);
+		}
+	}
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
+{
+	uint8_t tc;
+
+	qede_rx_queue_release(fp->rxq);
+	for (tc = 0; tc < qdev->num_tc; tc++)
+		qede_tx_queue_release(fp->txqs[tc]);
+}
+
+void qede_free_mem_load(struct qede_dev *qdev)
+{
+	uint8_t rss_id;
+
+	for_each_rss(rss_id) {
+		struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+		qede_free_mem_fp(qdev, fp);
+	}
+	/* qdev->num_rss = 0; */
+}
+
+/*
+ * Stop an Ethernet device. The device can be restarted with a call to
+ * rte_eth_dev_start().
+ * Do not change link state and do not release sw structures.
+ */
+void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	int rc;
+
+	DP_NOTICE(edev, false, "port %u\n", eth_dev->data->port_id);
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (qdev->state != QEDE_START) {
+		DP_INFO(edev, "device not yet started\n");
+		return;
+	}
+
+	rc = qede_stop_queues(qdev);
+
+	if (rc)
+		DP_ERR(edev, "Didn't succeed to close queues\n");
+
+	DP_INFO(edev, "Stopped queues\n");
+
+	qdev->ops->fastpath_stop(edev);
+
+	qede_reset_fp_rings(qdev);
+
+	qdev->state = QEDE_STOP;
+
+	DP_INFO(edev, "dev_state is QEDE_STOP\n");
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
new file mode 100644
index 0000000..5e4e55b
--- /dev/null
+++ b/drivers/net/qede/qede_rxtx.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_RXTX_H_
+#define _QEDE_RXTX_H_
+
+#include "qede_ethdev.h"
+
+/* Ring Descriptors */
+#define RX_RING_SIZE_POW        16	/* 64K */
+#define RX_RING_SIZE            (1ULL << RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX          (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN          128
+#define NUM_RX_BDS_DEF          NUM_RX_BDS_MAX
+#define NUM_RX_BDS(q)           (q->nb_rx_desc - 1)
+
+#define TX_RING_SIZE_POW        16	/* 64K */
+#define TX_RING_SIZE            (1ULL << TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX          (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN          128
+#define NUM_TX_BDS_DEF          NUM_TX_BDS_MAX
+#define NUM_TX_BDS(q)           (q->nb_tx_desc - 1)
+
+#define TX_CONS(txq)            (txq->sw_tx_cons & NUM_TX_BDS(txq))
+#define TX_PROD(txq)            (txq->sw_tx_prod & NUM_TX_BDS(txq))
+
+/* Number of TX BDs per packet used currently */
+#define MAX_NUM_TX_BDS			1
+
+#define QEDE_DEFAULT_TX_FREE_THRESH	32
+
+#define QEDE_CSUM_ERROR			(1 << 0)
+#define QEDE_CSUM_UNNECESSARY		(1 << 1)
+#define QEDE_TUNN_CSUM_UNNECESSARY	(1 << 2)
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+	((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
+
+#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
+	do { \
+		(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
+		(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
+		(bd)->nbytes = rte_cpu_to_le_16(len); \
+	} while (0)
+
+#define CQE_HAS_VLAN(flags) \
+	((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
+		<< PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
+
+#define CQE_HAS_OUTER_VLAN(flags) \
+	((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
+		<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+
+#define QEDE_IP_HEADER_ALIGNMENT_PADDING        2
+
+/* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_L1_CACHE_SHIFT	6
+#define QEDE_RX_ALIGN_SHIFT	(RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
+#define QEDE_FW_RX_ALIGN_END	(1UL << QEDE_RX_ALIGN_SHIFT)
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define QEDE_ETH_OVERHEAD       (ETHER_HDR_LEN + ETHER_CRC_LEN + \
+				 8 + 8 + QEDE_IP_HEADER_ALIGNMENT_PADDING + \
+				 QEDE_FW_RX_ALIGN_END)
+
+/* TBD: Excluding IPV6 */
+#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
+				 ETH_RSS_NONFRAG_IPV4_UDP)
+
+#define QEDE_TXQ_FLAGS		((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
+
+#define MAX_NUM_TC		8
+
+#define for_each_rss(i) for (i = 0; i < qdev->num_rss; i++)
+
+/*
+ * RX BD descriptor ring
+ */
+struct qede_rx_entry {
+	struct rte_mbuf *mbuf;
+	uint32_t page_offset;
+	/* allows expansion .. */
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct qede_rx_queue {
+	struct rte_mempool *mb_pool;
+	struct ecore_chain rx_bd_ring;
+	struct ecore_chain rx_comp_ring;
+	uint16_t *hw_cons_ptr;
+	void OSAL_IOMEM *hw_rxq_prod_addr;
+	struct qede_rx_entry *sw_rx_ring;
+	uint16_t sw_rx_cons;
+	uint16_t sw_rx_prod;
+	uint16_t nb_rx_desc;
+	uint16_t queue_id;
+	uint16_t port_id;
+	uint16_t rx_buf_size;
+	uint64_t rx_hw_errors;
+	uint64_t rx_alloc_errors;
+	struct qede_dev *qdev;
+};
+
+/*
+ * TX BD descriptor ring
+ */
+struct qede_tx_entry {
+	struct rte_mbuf *mbuf;
+	uint8_t flags;
+};
+
+union db_prod {
+	struct eth_db_data data;
+	uint32_t raw;
+};
+
+struct qede_tx_queue {
+	struct ecore_chain tx_pbl;
+	struct qede_tx_entry *sw_tx_ring;
+	uint16_t nb_tx_desc;
+	uint16_t nb_tx_avail;
+	uint16_t tx_free_thresh;
+	uint16_t queue_id;
+	uint16_t *hw_cons_ptr;
+	uint16_t sw_tx_cons;
+	uint16_t sw_tx_prod;
+	void OSAL_IOMEM *doorbell_addr;
+	volatile union db_prod tx_db;
+	uint16_t port_id;
+	uint64_t txq_counter;
+	struct qede_dev *qdev;
+};
+
+struct qede_fastpath {
+	struct qede_dev *qdev;
+	uint8_t rss_id;
+	struct ecore_sb_info *sb_info;
+	struct qede_rx_queue *rxq;
+	struct qede_tx_queue *txqs[MAX_NUM_TC];
+	char name[80];
+};
+
+/*
+ * RX/TX function prototypes
+ */
+int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			uint16_t nb_desc, unsigned int socket_id,
+			const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mp);
+
+int qede_tx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t queue_idx,
+			uint16_t nb_desc,
+			unsigned int socket_id,
+			const struct rte_eth_txconf *tx_conf);
+
+void qede_rx_queue_release(void *rx_queue);
+
+void qede_tx_queue_release(void *tx_queue);
+
+int qede_dev_start(struct rte_eth_dev *eth_dev);
+
+void qede_dev_stop(struct rte_eth_dev *eth_dev);
+
+void qede_reset_fp_rings(struct qede_dev *qdev);
+
+void qede_free_fp_arrays(struct qede_dev *qdev);
+
+void qede_free_mem_load(struct qede_dev *qdev);
+
+uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+			uint16_t nb_pkts);
+
+uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
+			uint16_t nb_pkts);
+
+#endif /* _QEDE_RXTX_H_ */
diff --git a/drivers/net/qede/rte_pmd_qede_version.map b/drivers/net/qede/rte_pmd_qede_version.map
new file mode 100644
index 0000000..5151684
--- /dev/null
+++ b/drivers/net/qede/rte_pmd_qede_version.map
@@ -0,0 +1,4 @@
+DPDK_2.2 {
+
+	local: *;
+};
-- 
1.7.10.3

  parent reply	other threads:[~2016-03-19  0:53 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-19  0:53 [PATCH v3 00/10] qede: Add qede PMD Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 01/10] qede: Add maintainers Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 02/10] qede: Add documentation Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 03/10] qede: Add license file Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 04/10] qede: Add base driver Rasesh Mody
2016-03-19  0:53 ` Rasesh Mody [this message]
2016-03-21 17:32   ` [PATCH v3 05/10] qede: Add core driver Stephen Hemminger
2016-03-22  1:14     ` Rasesh Mody
2016-03-22 10:55   ` Bruce Richardson
2016-03-22 11:03     ` Bruce Richardson
2016-03-19  0:53 ` [PATCH v3 06/10] qede: Add L2 support Rasesh Mody
2016-03-22 11:04   ` Bruce Richardson
2016-03-19  0:53 ` [PATCH v3 07/10] qede: Add SRIOV support Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 08/10] qede: Add attention support Rasesh Mody
2016-03-22 11:07   ` Bruce Richardson
2016-03-19  0:53 ` [PATCH v3 09/10] qede: Add DCBX support Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 10/10] qede: Enable PMD build Rasesh Mody
2016-03-22 11:21   ` Bruce Richardson
2016-03-22 11:21 ` [PATCH v3 00/10] qede: Add qede PMD Richardson, Bruce
2016-03-22 11:30   ` Bruce Richardson
2016-03-29 20:52     ` Rasesh Mody
2016-03-30 12:34       ` Bruce Richardson
2016-03-24  1:52   ` Rasesh Mody

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1458348805-32648-6-git-send-email-rasesh.mody@qlogic.com \
    --to=rasesh.mody@qlogic.com \
    --cc=ameen.rahman@qlogic.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=harish.patil@qlogic.com \
    --cc=sony.chacko@qlogic.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.