All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rasesh Mody <rasesh.mody@qlogic.com>
To: <thomas.monjalon@6wind.com>, <bruce.richardson@intel.com>
Cc: <dev@dpdk.org>, <ameen.rahman@qlogic.com>,
	<harish.patil@qlogic.com>, <sony.chacko@qlogic.com>,
	Rasesh Mody <rasesh.mody@qlogic.com>
Subject: [PATCH v3 06/10] qede: Add L2 support
Date: Fri, 18 Mar 2016 17:53:21 -0700	[thread overview]
Message-ID: <1458348805-32648-7-git-send-email-rasesh.mody@qlogic.com> (raw)
In-Reply-To: <1458348805-32648-1-git-send-email-rasesh.mody@qlogic.com>

Signed-off-by: Harish Patil <harish.patil@qlogic.com>
Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: Sony Chacko <sony.chacko@qlogic.com>
---
 drivers/net/qede/Makefile            |    2 +
 drivers/net/qede/base/ecore_chain.h  |    6 +
 drivers/net/qede/base/ecore_l2.c     | 1608 ++++++++++++++++++++++++++++++++++
 drivers/net/qede/base/ecore_l2.h     |  101 +++
 drivers/net/qede/base/ecore_l2_api.h |  401 +++++++++
 drivers/net/qede/qede_eth_if.c       |  456 ++++++++++
 drivers/net/qede/qede_eth_if.h       |    2 +-
 drivers/net/qede/qede_ethdev.c       |   17 +-
 drivers/net/qede/qede_ethdev.h       |    1 +
 drivers/net/qede/qede_if.h           |    9 +
 drivers/net/qede/qede_main.c         |    2 +
 drivers/net/qede/qede_rxtx.c         |  192 ++++
 12 files changed, 2793 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/qede/base/ecore_l2.c
 create mode 100644 drivers/net/qede/base/ecore_l2.h
 create mode 100644 drivers/net/qede/base/ecore_l2_api.h
 create mode 100644 drivers/net/qede/qede_eth_if.c

diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index efaefb2..eb08635 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -70,6 +70,7 @@ $(foreach obj, $(ECORE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_ECORE_DRIVER)))
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_l2.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c
@@ -78,6 +79,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
 
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index c9c21a6..8c8e8b4 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -251,6 +251,12 @@ static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
 	return p_chain->page_cnt;
 }
 
+static OSAL_INLINE
+dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
+{
+	return p_chain->pbl.p_phys_table;
+}
+
 /**
  * @brief ecore_chain_advance_page -
  *
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
new file mode 100644
index 0000000..8d713e7
--- /dev/null
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -0,0 +1,1608 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_l2.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_mcp.h"
+
+#define ECORE_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+			 struct ecore_sp_vport_start_params *p_params)
+{
+	struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_sp_init_data init_data;
+	u8 abs_vport_id = 0;
+	u16 rx_mode = 0;
+
+	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = ecore_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = p_params->opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_VPORT_START,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.vport_start;
+	p_ramrod->vport_id = abs_vport_id;
+
+	p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+	p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+	p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+	p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+	p_ramrod->untagged = p_params->only_untagged;
+	p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
+
+	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+	p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+
+	/* TPA related fields */
+	OSAL_MEMSET(&p_ramrod->tpa_param, 0,
+		    sizeof(struct eth_vport_tpa_param));
+	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+	switch (p_params->tpa_mode) {
+	case ECORE_TPA_MODE_GRO:
+		p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+		p_ramrod->tpa_param.tpa_max_size = (u16)-1;
+		p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+		p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+		p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+		p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+		p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+		p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+		break;
+	default:
+		break;
+	}
+
+	p_ramrod->tx_switching_en = p_params->tx_switching;
+#ifndef ASIC_ONLY
+	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+		p_ramrod->tx_switching_en = 0;
+#endif
+
+	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+	p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
+						    p_params->concrete_fid);
+
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+		     struct ecore_sp_vport_start_params *p_params)
+{
+	return ecore_sp_eth_vport_start(p_hwfn, p_params);
+}
+
+static enum _ecore_status_t
+ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
+			  struct vport_update_ramrod_data *p_ramrod,
+			  struct ecore_rss_params *p_rss)
+{
+	enum _ecore_status_t rc = ECORE_SUCCESS;
+	struct eth_vport_rss_config *p_config;
+	u16 abs_l2_queue = 0;
+	int i;
+
+	if (!p_rss) {
+		p_ramrod->common.update_rss_flg = 0;
+		return rc;
+	}
+	p_config = &p_ramrod->rss_config;
+
+	OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
+			  ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+	rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+	p_config->update_rss_key = p_rss->update_rss_key;
+
+	p_config->rss_mode = p_rss->rss_enable ?
+	    ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
+
+	p_config->capabilities = 0;
+
+	SET_FIELD(p_config->capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+		  !!(p_rss->rss_caps & ECORE_RSS_IPV4));
+	SET_FIELD(p_config->capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+		  !!(p_rss->rss_caps & ECORE_RSS_IPV6));
+	SET_FIELD(p_config->capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
+	SET_FIELD(p_config->capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
+	SET_FIELD(p_config->capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
+	SET_FIELD(p_config->capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
+	p_config->tbl_size = p_rss->rss_table_size_log;
+	p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+		   p_ramrod->common.update_rss_flg,
+		   p_config->rss_mode,
+		   p_config->update_rss_capabilities,
+		   p_config->capabilities,
+		   p_config->update_rss_ind_table, p_config->update_rss_key);
+
+	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+		rc = ecore_fw_l2_queue(p_hwfn,
+				       (u8)p_rss->rss_ind_table[i],
+				       &abs_l2_queue);
+		if (rc != ECORE_SUCCESS)
+			return rc;
+
+		p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
+			   i, p_config->indirection_table[i]);
+	}
+
+	for (i = 0; i < 10; i++)
+		p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
+
+	return rc;
+}
+
+static void
+ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
+			    struct vport_update_ramrod_data *p_ramrod,
+			    struct ecore_filter_accept_flags flags)
+{
+	p_ramrod->common.update_rx_mode_flg = flags.update_rx_mode_config;
+	p_ramrod->common.update_tx_mode_flg = flags.update_tx_mode_config;
+
+#ifndef ASIC_ONLY
+	/* On B0 emulation we cannot enable Tx, since this would cause writes
+	 * to PVFC HW block which isn't implemented in emulation.
+	 */
+	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+			   "Non-Asic - prevent Tx mode in vport update\n");
+		p_ramrod->common.update_tx_mode_flg = 0;
+	}
+#endif
+
+	/* Set Rx mode accept flags */
+	if (p_ramrod->common.update_rx_mode_flg) {
+		__le16 *state = &p_ramrod->rx_mode.state;
+		u8 accept_filter = flags.rx_accept_filter;
+
+/*
+ *		SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ *			  !!(accept_filter & ECORE_ACCEPT_NONE));
+ */
+/*
+ *		SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL,
+ *			  (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) &&
+ *			   !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+ */
+		SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+			  !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
+			    !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+
+		SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+			  !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
+/*
+ *		SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ *			  !!(accept_filter & ECORE_ACCEPT_NONE));
+ */
+		SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+			  !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
+			    !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+		SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+		SET_FIELD(*state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+			  !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+			   "p_ramrod->rx_mode.state = 0x%x\n",
+			   p_ramrod->rx_mode.state);
+	}
+
+	/* Set Tx mode accept flags */
+	if (p_ramrod->common.update_tx_mode_flg) {
+		__le16 *state = &p_ramrod->tx_mode.state;
+		u8 accept_filter = flags.tx_accept_filter;
+
+		SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+			  !!(accept_filter & ECORE_ACCEPT_NONE));
+
+		SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+			  !!(accept_filter & ECORE_ACCEPT_NONE));
+
+		SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+		SET_FIELD(*state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+			  !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+			   "p_ramrod->tx_mode.state = 0x%x\n",
+			   p_ramrod->tx_mode.state);
+	}
+}
+
+static void
+ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
+			      struct vport_update_ramrod_data *p_ramrod,
+			      struct ecore_sge_tpa_params *p_params)
+{
+	struct eth_vport_tpa_param *p_tpa;
+
+	if (!p_params) {
+		p_ramrod->common.update_tpa_param_flg = 0;
+		p_ramrod->common.update_tpa_en_flg = 0;
+		p_ramrod->common.update_tpa_param_flg = 0;
+		return;
+	}
+
+	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+	p_tpa = &p_ramrod->tpa_param;
+	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+	p_tpa->tpa_max_size = p_params->tpa_max_size;
+	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
+ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
+			  struct vport_update_ramrod_data *p_ramrod,
+			  struct ecore_sp_vport_update_params *p_params)
+{
+	int i;
+
+	OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
+		    sizeof(p_ramrod->approx_mcast.bins));
+
+	if (!p_params->update_approx_mcast_flg)
+		return;
+
+	p_ramrod->common.update_approx_mcast_flg = 1;
+	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+		u32 *p_bins = (u32 *)p_params->bins;
+
+		p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+	}
+}
+
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+		      struct ecore_sp_vport_update_params *p_params,
+		      enum spq_mode comp_mode,
+		      struct ecore_spq_comp_cb *p_comp_data)
+{
+	struct ecore_rss_params *p_rss_params = p_params->rss_params;
+	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_sp_init_data init_data;
+	u8 abs_vport_id = 0, val;
+	u16 wordval;
+
+	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = ecore_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = p_params->opaque_fid;
+	init_data.comp_mode = comp_mode;
+	init_data.p_comp_data = p_comp_data;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_VPORT_UPDATE,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Copy input params to ramrod according to FW struct */
+	p_ramrod = &p_ent->ramrod.vport_update;
+
+	p_ramrod->common.vport_id = abs_vport_id;
+
+	p_ramrod->common.rx_active_flg = p_params->vport_active_rx_flg;
+	p_ramrod->common.tx_active_flg = p_params->vport_active_tx_flg;
+	val = p_params->update_vport_active_rx_flg;
+	p_ramrod->common.update_rx_active_flg = val;
+	val = p_params->update_vport_active_tx_flg;
+	p_ramrod->common.update_tx_active_flg = val;
+	val = p_params->update_inner_vlan_removal_flg;
+	p_ramrod->common.update_inner_vlan_removal_en_flg = val;
+	val = p_params->inner_vlan_removal_flg;
+	p_ramrod->common.inner_vlan_removal_en = val;
+	val = p_params->silent_vlan_removal_flg;
+	p_ramrod->common.silent_vlan_removal_en = val;
+	val = p_params->update_tx_switching_flg;
+	p_ramrod->common.update_tx_switching_en_flg = val;
+	val = p_params->update_default_vlan_enable_flg;
+	p_ramrod->common.update_default_vlan_en_flg = val;
+	p_ramrod->common.default_vlan_en = p_params->default_vlan_enable_flg;
+	val = p_params->update_default_vlan_flg;
+	p_ramrod->common.update_default_vlan_flg = val;
+	wordval = p_params->default_vlan;
+	p_ramrod->common.default_vlan = OSAL_CPU_TO_LE16(wordval);
+
+	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+
+#ifndef ASIC_ONLY
+	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+		if (p_ramrod->common.tx_switching_en ||
+		    p_ramrod->common.update_tx_switching_en_flg) {
+			DP_NOTICE(p_hwfn, false,
+				  "FPGA - why are we seeing tx-switching? Overriding it\n");
+			p_ramrod->common.tx_switching_en = 0;
+			p_ramrod->common.update_tx_switching_en_flg = 1;
+		}
+#endif
+
+	val = p_params->update_anti_spoofing_en_flg;
+	p_ramrod->common.update_anti_spoofing_en_flg = val;
+	p_ramrod->common.anti_spoofing_en = p_params->anti_spoofing_en;
+	p_ramrod->common.accept_any_vlan = p_params->accept_any_vlan;
+	val = p_params->update_accept_any_vlan_flg;
+	p_ramrod->common.update_accept_any_vlan_flg = val;
+
+	rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+	if (rc != ECORE_SUCCESS) {
+		/* Return spq entry which is taken in ecore_sp_init_request() */
+		ecore_spq_return_entry(p_hwfn, p_ent);
+		return rc;
+	}
+
+	/* Update mcast bins for VFs, PF doesn't use this functionality */
+	ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+	ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+	ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
+				      p_params->sge_tpa_params);
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+					 u16 opaque_fid, u8 vport_id)
+{
+	struct vport_stop_ramrod_data *p_ramrod;
+	struct ecore_sp_init_data init_data;
+	struct ecore_spq_entry *p_ent;
+	enum _ecore_status_t rc;
+	u8 abs_vport_id = 0;
+
+	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = ecore_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_VPORT_STOP,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.vport_stop;
+	p_ramrod->vport_id = abs_vport_id;
+
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+			u8 vport,
+			struct ecore_filter_accept_flags accept_flags,
+			u8 update_accept_any_vlan,
+			u8 accept_any_vlan,
+			enum spq_mode comp_mode,
+			struct ecore_spq_comp_cb *p_comp_data)
+{
+	struct ecore_sp_vport_update_params update_params;
+	int i, rc;
+
+	/* Prepare and send the vport rx_mode change */
+	OSAL_MEMSET(&update_params, 0, sizeof(update_params));
+	update_params.vport_id = vport;
+	update_params.accept_flags = accept_flags;
+	update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+	update_params.accept_any_vlan = accept_any_vlan;
+
+	for_each_hwfn(p_dev, i) {
+		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+		update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+		rc = ecore_sp_vport_update(p_hwfn, &update_params,
+					   comp_mode, p_comp_data);
+		if (rc != ECORE_SUCCESS) {
+			DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
+			return rc;
+		}
+
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+			   accept_flags.rx_accept_filter,
+			   accept_flags.tx_accept_filter);
+
+		if (update_accept_any_vlan)
+			DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+				   "accept_any_vlan=%d configured\n",
+				   accept_any_vlan);
+	}
+
+	return 0;
+}
+
+static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
+				       struct ecore_hw_cid_data *p_cid_data)
+{
+	if (!p_cid_data->b_cid_allocated)
+		return;
+
+	ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
+	p_cid_data->b_cid_allocated = false;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			      u16 opaque_fid,
+			      u32 cid,
+			      u16 rx_queue_id,
+			      u8 vport_id,
+			      u8 stats_id,
+			      u16 sb,
+			      u8 sb_index,
+			      u16 bd_max_bytes,
+			      dma_addr_t bd_chain_phys_addr,
+			      dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+{
+	struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+	struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_sp_init_data init_data;
+	u16 abs_rx_q_id = 0;
+	u8 abs_vport_id = 0;
+
+	/* Store information for the stop */
+	p_rx_cid->cid = cid;
+	p_rx_cid->opaque_fid = opaque_fid;
+	p_rx_cid->vport_id = vport_id;
+
+	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+		   opaque_fid, cid, rx_queue_id, vport_id, sb);
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = cid;
+	init_data.opaque_fid = opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_RX_QUEUE_START,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
+	p_ramrod->sb_index = sb_index;
+	p_ramrod->vport_id = abs_vport_id;
+	p_ramrod->stats_counter_id = stats_id;
+	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+	p_ramrod->complete_cqe_flg = 0;
+	p_ramrod->complete_event_flg = 1;
+
+	p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
+	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
+
+	p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
+	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+						 u16 opaque_fid,
+						 u8 rx_queue_id,
+						 u8 vport_id,
+						 u8 stats_id,
+						 u16 sb,
+						 u8 sb_index,
+						 u16 bd_max_bytes,
+						 dma_addr_t bd_chain_phys_addr,
+						 dma_addr_t cqe_pbl_addr,
+						 u16 cqe_pbl_size,
+						 void OSAL_IOMEM **pp_prod)
+{
+	struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+	u8 abs_stats_id = 0;
+	u16 abs_l2_queue = 0;
+	enum _ecore_status_t rc;
+	u64 init_prod_val = 0;
+
+	rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	*pp_prod = (u8 OSAL_IOMEM *) p_hwfn->regview +
+	    GTT_BAR0_MAP_REG_MSDM_RAM + MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+			  (u32 *)(&init_prod_val));
+
+	/* Allocate a CID for the queue */
+	rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+		return rc;
+	}
+	p_rx_cid->b_cid_allocated = true;
+
+	rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
+					   opaque_fid,
+					   p_rx_cid->cid,
+					   rx_queue_id,
+					   vport_id,
+					   abs_stats_id,
+					   sb,
+					   sb_index,
+					   bd_max_bytes,
+					   bd_chain_phys_addr,
+					   cqe_pbl_addr, cqe_pbl_size);
+
+	if (rc != ECORE_SUCCESS)
+		ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+	return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+			      u16 rx_queue_id,
+			      u8 num_rxqs,
+			      u8 complete_cqe_flg,
+			      u8 complete_event_flg,
+			      enum spq_mode comp_mode,
+			      struct ecore_spq_comp_cb *p_comp_data)
+{
+	struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_sp_init_data init_data;
+	struct ecore_hw_cid_data *p_rx_cid;
+	u16 qid, abs_rx_q_id = 0;
+	u8 i;
+
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.comp_mode = comp_mode;
+	init_data.p_comp_data = p_comp_data;
+
+	for (i = 0; i < num_rxqs; i++) {
+		qid = rx_queue_id + i;
+		p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+		/* Get SPQ entry */
+		init_data.cid = p_rx_cid->cid;
+		init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+		rc = ecore_sp_init_request(p_hwfn, &p_ent,
+					   ETH_RAMROD_RX_QUEUE_UPDATE,
+					   PROTOCOLID_ETH, &init_data);
+		if (rc != ECORE_SUCCESS)
+			return rc;
+
+		p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+		ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+		ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+		p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+		p_ramrod->complete_cqe_flg = complete_cqe_flg;
+		p_ramrod->complete_event_flg = complete_event_flg;
+
+		rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+			   u16 rx_queue_id,
+			   bool eq_completion_only, bool cqe_completion)
+{
+	struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+	struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_sp_init_data init_data;
+	u16 abs_rx_q_id = 0;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = p_rx_cid->cid;
+	init_data.opaque_fid = p_rx_cid->opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_RX_QUEUE_STOP,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+	ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+	ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+	/* Cleaning the queue requires the completion to arrive there.
+	 * In addition, VFs require the answer to come as eqe to PF.
+	 */
+	p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
+					  p_hwfn->hw_info.opaque_fid) &&
+				      !eq_completion_only) || cqe_completion;
+	p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
+					 p_hwfn->hw_info.opaque_fid) ||
+	    eq_completion_only;
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+	return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			      u16 opaque_fid,
+			      u16 tx_queue_id,
+			      u32 cid,
+			      u8 vport_id,
+			      u8 stats_id,
+			      u16 sb,
+			      u8 sb_index,
+			      dma_addr_t pbl_addr,
+			      u16 pbl_size,
+			      union ecore_qm_pq_params *p_pq_params)
+{
+	struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+	struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_sp_init_data init_data;
+	u16 pq_id, abs_tx_q_id = 0;
+	u8 abs_vport_id;
+
+	/* Store information for the stop */
+	p_tx_cid->cid = cid;
+	p_tx_cid->opaque_fid = opaque_fid;
+
+	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = cid;
+	init_data.opaque_fid = opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_TX_QUEUE_START,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.tx_queue_start;
+	p_ramrod->vport_id = abs_vport_id;
+
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
+	p_ramrod->sb_index = sb_index;
+	p_ramrod->stats_counter_id = stats_id;
+
+	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
+
+	p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
+	p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
+	p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+
+	pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+	p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
+
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+						 u16 opaque_fid,
+						 u16 tx_queue_id,
+						 u8 vport_id,
+						 u8 stats_id,
+						 u16 sb,
+						 u8 sb_index,
+						 dma_addr_t pbl_addr,
+						 u16 pbl_size,
+						 void OSAL_IOMEM **pp_doorbell)
+{
+	struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+	union ecore_qm_pq_params pq_params;
+	enum _ecore_status_t rc;
+	u8 abs_stats_id = 0;
+
+	rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
+	OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+
+	/* Allocate a CID for the queue */
+	rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+		return rc;
+	}
+	p_tx_cid->b_cid_allocated = true;
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+		   opaque_fid, p_tx_cid->cid, tx_queue_id, vport_id, sb);
+
+	/* TODO - set tc in the pq_params for multi-cos */
+	rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
+					   opaque_fid,
+					   tx_queue_id,
+					   p_tx_cid->cid,
+					   vport_id,
+					   abs_stats_id,
+					   sb,
+					   sb_index,
+					   pbl_addr, pbl_size, &pq_params);
+
+	*pp_doorbell = (u8 OSAL_IOMEM *) p_hwfn->doorbells +
+	    DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+	if (rc != ECORE_SUCCESS)
+		ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+	return rc;
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
+{
+	return ECORE_NOTIMPL;
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+						u16 tx_queue_id)
+{
+	struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+	struct tx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+	struct ecore_sp_init_data init_data;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = p_tx_cid->cid;
+	init_data.opaque_fid = p_tx_cid->opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_TX_QUEUE_STOP,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.tx_queue_stop;
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+	return rc;
+}
+
+static enum eth_filter_action
+ecore_filter_action(enum ecore_filter_opcode opcode)
+{
+	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+	switch (opcode) {
+	case ECORE_FILTER_ADD:
+		action = ETH_FILTER_ACTION_ADD;
+		break;
+	case ECORE_FILTER_REMOVE:
+		action = ETH_FILTER_ACTION_REMOVE;
+		break;
+	case ECORE_FILTER_FLUSH:
+		action = ETH_FILTER_ACTION_REMOVE_ALL;
+		break;
+	default:
+		action = MAX_ETH_FILTER_ACTION;
+	}
+
+	return action;
+}
+
+static void ecore_set_fw_mac_addr(__le16 *fw_msb,
+				  __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
+{
+	((u8 *)fw_msb)[0] = mac[1];
+	((u8 *)fw_msb)[1] = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lsb)[0] = mac[5];
+	((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static enum _ecore_status_t
+ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
+			  u16 opaque_fid,
+			  struct ecore_filter_ucast *p_filter_cmd,
+			  struct vport_filter_update_ramrod_data **pp_ramrod,
+			  struct ecore_spq_entry **pp_ent,
+			  enum spq_mode comp_mode,
+			  struct ecore_spq_comp_cb *p_comp_data)
+{
+	struct vport_filter_update_ramrod_data *p_ramrod;
+	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+	struct eth_filter_cmd *p_first_filter;
+	struct eth_filter_cmd *p_second_filter;
+	struct ecore_sp_init_data init_data;
+	enum eth_filter_action action;
+	enum _ecore_status_t rc;
+
+	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+			    &vport_to_remove_from);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+			    &vport_to_add_to);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = ecore_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = opaque_fid;
+	init_data.comp_mode = comp_mode;
+	init_data.p_comp_data = p_comp_data;
+
+	rc = ecore_sp_init_request(p_hwfn, pp_ent,
+				   ETH_RAMROD_FILTERS_UPDATE,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+	p_ramrod = *pp_ramrod;
+	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+#ifndef ASIC_ONLY
+	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+			   "Non-Asic - prevent Tx filters\n");
+		p_ramrod->filter_cmd_hdr.tx = 0;
+	}
+#endif
+
+	switch (p_filter_cmd->opcode) {
+	case ECORE_FILTER_REPLACE:
+	case ECORE_FILTER_MOVE:
+		p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
+		break;
+	default:
+		p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
+		break;
+	}
+
+	p_first_filter = &p_ramrod->filter_cmds[0];
+	p_second_filter = &p_ramrod->filter_cmds[1];
+
+	switch (p_filter_cmd->type) {
+	case ECORE_FILTER_MAC:
+		p_first_filter->type = ETH_FILTER_TYPE_MAC;
+		break;
+	case ECORE_FILTER_VLAN:
+		p_first_filter->type = ETH_FILTER_TYPE_VLAN;
+		break;
+	case ECORE_FILTER_MAC_VLAN:
+		p_first_filter->type = ETH_FILTER_TYPE_PAIR;
+		break;
+	case ECORE_FILTER_INNER_MAC:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
+		break;
+	case ECORE_FILTER_INNER_VLAN:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
+		break;
+	case ECORE_FILTER_INNER_PAIR:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
+		break;
+	case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+		break;
+	case ECORE_FILTER_MAC_VNI_PAIR:
+		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
+		break;
+	case ECORE_FILTER_VNI:
+		p_first_filter->type = ETH_FILTER_TYPE_VNI;
+		break;
+	}
+
+	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
+		ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
+				      &p_first_filter->mac_mid,
+				      &p_first_filter->mac_lsb,
+				      (u8 *)p_filter_cmd->mac);
+
+	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+		p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
+
+	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+		p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
+
+	if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
+		p_second_filter->type = p_first_filter->type;
+		p_second_filter->mac_msb = p_first_filter->mac_msb;
+		p_second_filter->mac_mid = p_first_filter->mac_mid;
+		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+		p_second_filter->vlan_id = p_first_filter->vlan_id;
+		p_second_filter->vni = p_first_filter->vni;
+
+		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+		p_first_filter->vport_id = vport_to_remove_from;
+
+		p_second_filter->action = ETH_FILTER_ACTION_ADD;
+		p_second_filter->vport_id = vport_to_add_to;
+	} else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
+		p_first_filter->vport_id = vport_to_add_to;
+		OSAL_MEMCPY(p_second_filter, p_first_filter,
+			    sizeof(*p_second_filter));
+		p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+		p_second_filter->action = ETH_FILTER_ACTION_ADD;
+	} else {
+		action = ecore_filter_action(p_filter_cmd->opcode);
+
+		if (action == MAX_ETH_FILTER_ACTION) {
+			DP_NOTICE(p_hwfn, true,
+				  "%d is not suppported yet\n",
+				  p_filter_cmd->opcode);
+			return ECORE_NOTIMPL;
+		}
+
+		p_first_filter->action = action;
+		p_first_filter->vport_id =
+		    (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+		    vport_to_remove_from : vport_to_add_to;
+	}
+
+	return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+			  u16 opaque_fid,
+			  struct ecore_filter_ucast *p_filter_cmd,
+			  enum spq_mode comp_mode,
+			  struct ecore_spq_comp_cb *p_comp_data)
+{
+	struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	struct eth_filter_cmd_header *p_header;
+	enum _ecore_status_t rc;
+
+	rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+				       &p_ramrod, &p_ent,
+				       comp_mode, p_comp_data);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+		return rc;
+	}
+	p_header = &p_ramrod->filter_cmd_hdr;
+	p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
+		return rc;
+	}
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+		   (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
+		   ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+		    "REMOVE" :
+		    ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
+		     "MOVE" : "REPLACE")),
+		   (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
+		   ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
+		    "VLAN" : "MAC & VLAN"),
+		   p_ramrod->filter_cmd_hdr.cmd_cnt,
+		   p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+		   p_filter_cmd->vport_to_add_to,
+		   p_filter_cmd->vport_to_remove_from,
+		   p_filter_cmd->mac[0], p_filter_cmd->mac[1],
+		   p_filter_cmd->mac[2], p_filter_cmd->mac[3],
+		   p_filter_cmd->mac[4], p_filter_cmd->mac[5],
+		   p_filter_cmd->vlan);
+
+	return ECORE_SUCCESS;
+}
+
+/*******************************************************************************
+ * Description:
+ *         Calculates crc 32 on a buffer
+ *         Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 ecore_calc_crc32c(u8 *crc32_packet,
+			     u32 crc32_length, u32 crc32_seed, u8 complement)
+{
+	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+	u8 msb = 0, current_byte = 0;
+
+	if ((crc32_packet == OSAL_NULL) ||
+	    (crc32_length == 0) || ((crc32_length % 8) != 0)) {
+		return crc32_result;
+	}
+
+	for (byte = 0; byte < crc32_length; byte++) {
+		current_byte = crc32_packet[byte];
+		for (bit = 0; bit < 8; bit++) {
+			msb = (u8)(crc32_result >> 31);
+			crc32_result = crc32_result << 1;
+			if (msb != (0x1 & (current_byte >> bit))) {
+				crc32_result = crc32_result ^ CRC32_POLY;
+				crc32_result |= 1;
+			}
+		}
+	}
+
+	return crc32_result;
+}
+
+static OSAL_INLINE u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+{
+	u32 packet_buf[2] = { 0 };
+
+	OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
+	return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+u8 ecore_mcast_bin_from_mac(u8 *mac)
+{
+	u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+				  mac, ETH_ALEN);
+
+	return crc & 0xff;
+}
+
+static enum _ecore_status_t
+ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
+			  u16 opaque_fid,
+			  struct ecore_filter_mcast *p_filter_cmd,
+			  enum spq_mode comp_mode,
+			  struct ecore_spq_comp_cb *p_comp_data)
+{
+	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+	long unsigned bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	struct ecore_sp_init_data init_data;
+	enum _ecore_status_t rc;
+	u8 abs_vport_id = 0;
+	int i;
+
+	rc = ecore_fw_vport(p_hwfn,
+			    (p_filter_cmd->opcode == ECORE_FILTER_ADD) ?
+			    p_filter_cmd->vport_to_add_to :
+			    p_filter_cmd->vport_to_remove_from, &abs_vport_id);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = ecore_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = comp_mode;
+	init_data.p_comp_data = p_comp_data;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   ETH_RAMROD_VPORT_UPDATE,
+				   PROTOCOLID_ETH, &init_data);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+		return rc;
+	}
+
+	p_ramrod = &p_ent->ramrod.vport_update;
+	p_ramrod->common.update_approx_mcast_flg = 1;
+
+	/* explicitly clear out the entire vector */
+	OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
+		    0, sizeof(p_ramrod->approx_mcast.bins));
+	OSAL_MEMSET(bins, 0, sizeof(long unsigned) *
+		    ETH_MULTICAST_MAC_BINS_IN_REGS);
+
+	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+		/* filter ADD op is explicit set op and it removes
+		 *  any existing filters for the vport.
+		 */
+		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+			u32 bit;
+
+			bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+			OSAL_SET_BIT(bit, bins);
+		}
+
+		/* Convert to correct endianity */
+		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+			struct vport_update_ramrod_mcast *p_ramrod_bins;
+			u32 *p_bins = (u32 *)bins;
+
+			p_ramrod_bins = &p_ramrod->approx_mcast;
+			p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+		}
+	}
+
+	p_ramrod->common.vport_id = abs_vport_id;
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+	if (rc != ECORE_SUCCESS)
+		DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
+
+	return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+		       struct ecore_filter_mcast *p_filter_cmd,
+		       enum spq_mode comp_mode,
+		       struct ecore_spq_comp_cb *p_comp_data)
+{
+	enum _ecore_status_t rc = ECORE_SUCCESS;
+	int i;
+
+	/* only ADD and REMOVE operations are supported for multi-cast */
+	if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
+	     (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
+	    (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
+		return ECORE_INVAL;
+	}
+
+	for_each_hwfn(p_dev, i) {
+		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+		rc = ecore_sp_eth_filter_mcast(p_hwfn,
+					       p_hwfn->hw_info.opaque_fid,
+					       p_filter_cmd,
+					       comp_mode, p_comp_data);
+		if (rc != ECORE_SUCCESS)
+			break;
+	}
+
+	return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+		       struct ecore_filter_ucast *p_filter_cmd,
+		       enum spq_mode comp_mode,
+		       struct ecore_spq_comp_cb *p_comp_data)
+{
+	enum _ecore_status_t rc = ECORE_SUCCESS;
+	int i;
+
+	for_each_hwfn(p_dev, i) {
+		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+		rc = ecore_sp_eth_filter_ucast(p_hwfn,
+					       p_hwfn->hw_info.opaque_fid,
+					       p_filter_cmd,
+					       comp_mode, p_comp_data);
+		if (rc != ECORE_SUCCESS)
+			break;
+	}
+
+	return rc;
+}
+
+/* Statistics related code */
+static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
+					     u32 *p_addr, u32 *p_len,
+					     u16 statistics_bin)
+{
+	*p_addr = BAR0_MAP_REG_PSDM_RAM +
+		    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+	*p_len = sizeof(struct eth_pstorm_per_queue_stat);
+}
+
+static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
+				     struct ecore_ptt *p_ptt,
+				     struct ecore_eth_stats *p_stats,
+				     u16 statistics_bin)
+{
+	struct eth_pstorm_per_queue_stat pstats;
+	u32 pstats_addr = 0, pstats_len = 0;
+
+	__ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+					 statistics_bin);
+
+	OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+	ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+	p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+	p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+	p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+	p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+	p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+	p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+	p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
+				     struct ecore_ptt *p_ptt,
+				     struct ecore_eth_stats *p_stats,
+				     u16 statistics_bin)
+{
+	struct tstorm_per_port_stat tstats;
+	u32 tstats_addr, tstats_len;
+
+	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+		    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+	tstats_len = sizeof(struct tstorm_per_port_stat);
+
+	OSAL_MEMSET(&tstats, 0, sizeof(tstats));
+	ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
+
+	p_stats->mftag_filter_discards +=
+	    HILO_64_REGPAIR(tstats.mftag_filter_discard);
+	p_stats->mac_filter_discards +=
+	    HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
+					     u32 *p_addr, u32 *p_len,
+					     u16 statistics_bin)
+{
+	*p_addr = BAR0_MAP_REG_USDM_RAM +
+		    USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+	*p_len = sizeof(struct eth_ustorm_per_queue_stat);
+}
+
+static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
+				     struct ecore_ptt *p_ptt,
+				     struct ecore_eth_stats *p_stats,
+				     u16 statistics_bin)
+{
+	struct eth_ustorm_per_queue_stat ustats;
+	u32 ustats_addr = 0, ustats_len = 0;
+
+	__ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+					 statistics_bin);
+
+	OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+	ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+	p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+	p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+	p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+	p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+	p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+	p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
+					     u32 *p_addr, u32 *p_len,
+					     u16 statistics_bin)
+{
+	*p_addr = BAR0_MAP_REG_MSDM_RAM +
+		    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+	*p_len = sizeof(struct eth_mstorm_per_queue_stat);
+}
+
+static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
+				     struct ecore_ptt *p_ptt,
+				     struct ecore_eth_stats *p_stats,
+				     u16 statistics_bin)
+{
+	struct eth_mstorm_per_queue_stat mstats;
+	u32 mstats_addr = 0, mstats_len = 0;
+
+	__ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+					 statistics_bin);
+
+	OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+	ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
+
+	p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
+	p_stats->packet_too_big_discard +=
+	    HILO_64_REGPAIR(mstats.packet_too_big_discard);
+	p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+	p_stats->tpa_coalesced_pkts +=
+	    HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+	p_stats->tpa_coalesced_events +=
+	    HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+	p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
+	p_stats->tpa_coalesced_bytes +=
+	    HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
+					 struct ecore_ptt *p_ptt,
+					 struct ecore_eth_stats *p_stats)
+{
+	struct port_stats port_stats;
+	int j;
+
+	OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
+
+	ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
+			  p_hwfn->mcp_info->port_addr +
+			  OFFSETOF(struct public_port, stats),
+			  sizeof(port_stats));
+
+	p_stats->rx_64_byte_packets += port_stats.pmm.r64;
+	p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
+	p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
+	p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
+	p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
+	p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
+	p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
+	p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
+	p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
+	p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
+	p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
+	p_stats->rx_crc_errors += port_stats.pmm.rfcs;
+	p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+	p_stats->rx_pause_frames += port_stats.pmm.rxpf;
+	p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
+	p_stats->rx_align_errors += port_stats.pmm.raln;
+	p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
+	p_stats->rx_oversize_packets += port_stats.pmm.rovr;
+	p_stats->rx_jabbers += port_stats.pmm.rjbr;
+	p_stats->rx_undersize_packets += port_stats.pmm.rund;
+	p_stats->rx_fragments += port_stats.pmm.rfrg;
+	p_stats->tx_64_byte_packets += port_stats.pmm.t64;
+	p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+	p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+	p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+	p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+	p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+	p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+	p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+	p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+	p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+	p_stats->tx_pause_frames += port_stats.pmm.txpf;
+	p_stats->tx_pfc_frames += port_stats.pmm.txpp;
+	p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+	p_stats->tx_total_collisions += port_stats.pmm.tncl;
+	p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
+	p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+	p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+	p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+	p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+	p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
+	p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+	p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+	p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+	p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+	for (j = 0; j < 8; j++) {
+		p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
+		p_stats->brb_discards += port_stats.brb.brb_discard[j];
+	}
+}
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+			     struct ecore_ptt *p_ptt,
+			     struct ecore_eth_stats *stats,
+			     u16 statistics_bin, bool b_get_port_stats)
+{
+	__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+	__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+	__ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+	__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+#ifndef ASIC_ONLY
+	/* Avoid getting PORT stats for emulation. */
+	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+		return;
+#endif
+
+	if (b_get_port_stats && p_hwfn->mcp_info)
+		__ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
+				   struct ecore_eth_stats *stats)
+{
+	u8 fw_vport = 0;
+	int i;
+
+	OSAL_MEMSET(stats, 0, sizeof(*stats));
+
+	for_each_hwfn(p_dev, i) {
+		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+		struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+
+		/* The main vport index is relative first */
+		if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
+			DP_ERR(p_hwfn, "No vport available!\n");
+			goto out;
+		}
+
+		if (!p_ptt) {
+			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+			continue;
+		}
+
+		__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+					true);
+
+out:
+		ecore_ptt_release(p_hwfn, p_ptt);
+	}
+}
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+			   struct ecore_eth_stats *stats)
+{
+	u32 i;
+
+	if (!p_dev) {
+		OSAL_MEMSET(stats, 0, sizeof(*stats));
+		return;
+	}
+
+	_ecore_get_vport_stats(p_dev, stats);
+
+	if (!p_dev->reset_stats)
+		return;
+
+	/* Reduce the statistics baseline */
+	for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
+		((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void ecore_reset_vport_stats(struct ecore_dev *p_dev)
+{
+	int i;
+
+	for_each_hwfn(p_dev, i) {
+		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+		struct eth_mstorm_per_queue_stat mstats;
+		struct eth_ustorm_per_queue_stat ustats;
+		struct eth_pstorm_per_queue_stat pstats;
+		struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+		u32 addr = 0, len = 0;
+
+		if (!p_ptt) {
+			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+			continue;
+		}
+
+		OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+		__ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+		ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+		OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+		__ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+		ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+		OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+		__ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+		ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+		ecore_ptt_release(p_hwfn, p_ptt);
+	}
+
+	/* PORT statistics are not necessarily reset, so we need to
+	 * read and create a baseline for future statistics.
+	 */
+	if (!p_dev->reset_stats)
+		DP_INFO(p_dev, "Reset stats not allocated\n");
+	else
+		_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
new file mode 100644
index 0000000..658af45
--- /dev/null
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_H__
+#define __ECORE_L2_H__
+
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_spq.h"
+#include "ecore_l2_api.h"
+
+/**
+ * @brief ecore_sp_eth_tx_queue_update -
+ *
+ * This ramrod updates a TX queue. It is used for setting the active
+ * state of the queue.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+			 struct ecore_sp_vport_start_params *p_params);
+
+/**
+ * @brief - Starts an Rx queue; Should be used where contexts are handled
+ * outside of the ramrod area [specifically iov scenarios]
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param cid
+ * @param rx_queue_id
+ * @param vport_id
+ * @param stats_id
+ * @param sb
+ * @param sb_index
+ * @param bd_max_bytes
+ * @param bd_chain_phys_addr
+ * @param cqe_pbl_addr
+ * @param cqe_pbl_size
+ * @param leading
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			      u16 opaque_fid,
+			      u32 cid,
+			      u16 rx_queue_id,
+			      u8 vport_id,
+			      u8 stats_id,
+			      u16 sb,
+			      u8 sb_index,
+			      u16 bd_max_bytes,
+			      dma_addr_t bd_chain_phys_addr,
+			      dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+/**
+ * @brief - Starts a Tx queue; Should be used where contexts are handled
+ * outside of the ramrod area [specifically iov scenarios]
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param tx_queue_id
+ * @param cid
+ * @param vport_id
+ * @param stats_id
+ * @param sb
+ * @param sb_index
+ * @param pbl_addr
+ * @param pbl_size
+ * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			      u16 opaque_fid,
+			      u16 tx_queue_id,
+			      u32 cid,
+			      u8 vport_id,
+			      u8 stats_id,
+			      u16 sb,
+			      u8 sb_index,
+			      dma_addr_t pbl_addr,
+			      u16 pbl_size,
+			      union ecore_qm_pq_params *p_pq_params);
+
+u8 ecore_mcast_bin_from_mac(u8 *mac);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
new file mode 100644
index 0000000..1e01b57
--- /dev/null
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_API_H__
+#define __ECORE_L2_API_H__
+
+#include "ecore_status.h"
+#include "ecore_sp_api.h"
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_rss_caps {
+	ECORE_RSS_IPV4 = 0x1,
+	ECORE_RSS_IPV6 = 0x2,
+	ECORE_RSS_IPV4_TCP = 0x4,
+	ECORE_RSS_IPV6_TCP = 0x8,
+	ECORE_RSS_IPV4_UDP = 0x10,
+	ECORE_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define ECORE_RSS_IND_TABLE_SIZE 128
+#define ECORE_RSS_KEY_SIZE 10	/* size in 32b chunks */
+#endif
+
+struct ecore_rss_params {
+	u8 update_rss_config;
+	u8 rss_enable;
+	u8 rss_eng_id;
+	u8 update_rss_capabilities;
+	u8 update_rss_ind_table;
+	u8 update_rss_key;
+	u8 rss_caps;
+	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
+	u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+	u32 rss_key[ECORE_RSS_KEY_SIZE];
+};
+
+struct ecore_sge_tpa_params {
+	u8 max_buffers_per_cqe;
+
+	u8 update_tpa_en_flg;
+	u8 tpa_ipv4_en_flg;
+	u8 tpa_ipv6_en_flg;
+	u8 tpa_ipv4_tunn_en_flg;
+	u8 tpa_ipv6_tunn_en_flg;
+
+	u8 update_tpa_param_flg;
+	u8 tpa_pkt_split_flg;
+	u8 tpa_hdr_data_split_flg;
+	u8 tpa_gro_consistent_flg;
+	u8 tpa_max_aggs_num;
+	u16 tpa_max_size;
+	u16 tpa_min_size_to_start;
+	u16 tpa_min_size_to_cont;
+};
+
+enum ecore_filter_opcode {
+	ECORE_FILTER_ADD,
+	ECORE_FILTER_REMOVE,
+	ECORE_FILTER_MOVE,
+	ECORE_FILTER_REPLACE,	/* Delete all MACs and add new one instead */
+	ECORE_FILTER_FLUSH,	/* Removes all filters */
+};
+
+enum ecore_filter_ucast_type {
+	ECORE_FILTER_MAC,
+	ECORE_FILTER_VLAN,
+	ECORE_FILTER_MAC_VLAN,
+	ECORE_FILTER_INNER_MAC,
+	ECORE_FILTER_INNER_VLAN,
+	ECORE_FILTER_INNER_PAIR,
+	ECORE_FILTER_INNER_MAC_VNI_PAIR,
+	ECORE_FILTER_MAC_VNI_PAIR,
+	ECORE_FILTER_VNI,
+};
+
+struct ecore_filter_ucast {
+	enum ecore_filter_opcode opcode;
+	enum ecore_filter_ucast_type type;
+	u8 is_rx_filter;
+	u8 is_tx_filter;
+	u8 vport_to_add_to;
+	u8 vport_to_remove_from;
+	unsigned char mac[ETH_ALEN];
+	u8 assert_on_error;
+	u16 vlan;
+	u32 vni;
+};
+
+struct ecore_filter_mcast {
+	/* MOVE is not supported for multicast */
+	enum ecore_filter_opcode opcode;
+	u8 vport_to_add_to;
+	u8 vport_to_remove_from;
+	u8 num_mc_addrs;
+#define ECORE_MAX_MC_ADDRS	64
+	unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct ecore_filter_accept_flags {
+	u8 update_rx_mode_config;
+	u8 update_tx_mode_config;
+	u8 rx_accept_filter;
+	u8 tx_accept_filter;
+#define	ECORE_ACCEPT_NONE		0x01
+#define ECORE_ACCEPT_UCAST_MATCHED	0x02
+#define ECORE_ACCEPT_UCAST_UNMATCHED	0x04
+#define ECORE_ACCEPT_MCAST_MATCHED	0x08
+#define ECORE_ACCEPT_MCAST_UNMATCHED	0x10
+#define ECORE_ACCEPT_BCAST		0x20
+};
+
+/* Add / remove / move / remove-all unicast MAC-VLAN filters.
+ * FW will assert in the following cases, so driver should take care...:
+ * 1. Adding a filter to a full table.
+ * 2. Adding a filter which already exists on that vport.
+ * 3. Removing a filter which doesn't exist.
+ */
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+		       struct ecore_filter_ucast *p_filter_cmd,
+		       enum spq_mode comp_mode,
+		       struct ecore_spq_comp_cb *p_comp_data);
+
+/* Add / remove / move multicast MAC filters. */
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+		       struct ecore_filter_mcast *p_filter_cmd,
+		       enum spq_mode comp_mode,
+		       struct ecore_spq_comp_cb *p_comp_data);
+
+/* Set "accept" filters */
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+			u8 vport,
+			struct ecore_filter_accept_flags accept_flags,
+			u8 update_accept_any_vlan,
+			u8 accept_any_vlan,
+			enum spq_mode comp_mode,
+			struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ *
+ * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
+ * the VPort ID is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param rx_queue_id		RX Queue ID: Zero based, per VPort, allocated
+ *				by assignment (=rssId)
+ * @param vport_id		VPort ID
+ * @param u8 stats_id           VPort ID which the queue stats
+ *				will be added to
+ * @param sb			Status Block of the Function Event Ring
+ * @param sb_index		Index into the status block of the
+ *			Function Event Ring
+ * @param bd_max_bytes		Maximum bytes that can be placed on a BD
+ * @param bd_chain_phys_addr	Physical address of BDs for receive.
+ * @param cqe_pbl_addr		Physical address of the CQE PBL Table.
+ * @param cqe_pbl_size		Size of the CQE PBL Table
+ * @param pp_prod		Pointer to place producer's
+ *                              address for the Rx Q (May be
+ *				NULL).
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+						 u16 opaque_fid,
+						 u8 rx_queue_id,
+						 u8 vport_id,
+						 u8 stats_id,
+						 u16 sb,
+						 u8 sb_index,
+						 u16 bd_max_bytes,
+						 dma_addr_t bd_chain_phys_addr,
+						 dma_addr_t cqe_pbl_addr,
+						 u16 cqe_pbl_size,
+						 void OSAL_IOMEM **pp_prod);
+
+/**
+ * @brief ecore_sp_eth_rx_queue_stop -
+ *
+ * This ramrod closes an RX queue. It sends RX queue stop ramrod
+ * + CFC delete ramrod
+ *
+ * @param p_hwfn
+ * @param rx_queue_id		RX Queue ID
+ * @param eq_completion_only	If True completion will be on
+ *				EQe, if False completion will be
+ *				on EQe if p_hwfn opaque
+ *				different from the RXQ opaque
+ *				otherwise on CQe.
+ * @param cqe_completion	If True completion will be
+ *				recieve on CQe.
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+			   u16 rx_queue_id,
+			   bool eq_completion_only, bool cqe_completion);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ *
+ * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
+ * the VPort is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param tx_queue_id		TX Queue ID
+ * @param vport_id		VPort ID
+ * @param stats_id              VPort ID which the queue stats
+ *				will be added to
+ * @param sb			Status Block of the Function Event Ring
+ * @param sb_index		Index into the status block of the Function
+ *				Event Ring
+ * @param pbl_addr		address of the pbl array
+ * @param pbl_size		number of entries in pbl
+ * @param pp_doorbell		Pointer to place doorbell pointer (May be NULL).
+ *			This address should be used with the
+ *				DIRECT_REG_WR macro.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+						 u16 opaque_fid,
+						 u16 tx_queue_id,
+						 u8 vport_id,
+						 u8 stats_id,
+						 u16 sb,
+						 u8 sb_index,
+						 dma_addr_t pbl_addr,
+						 u16 pbl_size,
+						 void OSAL_IOMEM **
+						 pp_doorbell);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_stop -
+ *
+ * This ramrod closes a TX queue. It sends TX queue stop ramrod
+ * + CFC delete ramrod
+ *
+ * @param p_hwfn
+ * @param tx_queue_id		TX Queue ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+						u16 tx_queue_id);
+
+enum ecore_tpa_mode {
+	ECORE_TPA_MODE_NONE,
+	ECORE_TPA_MODE_RSC,
+	ECORE_TPA_MODE_GRO,
+	ECORE_TPA_MODE_MAX
+};
+
+struct ecore_sp_vport_start_params {
+	enum ecore_tpa_mode tpa_mode;
+	bool remove_inner_vlan;	/* Inner VLAN removal is enabled */
+	bool tx_switching;	/* Vport supports tx-switching */
+	bool handle_ptp_pkts;	/* Handle PTP packets */
+	bool only_untagged;	/* Untagged pkt control */
+	bool drop_ttl0;		/* Drop packets with TTL = 0 */
+	u8 max_buffers_per_cqe;
+	u32 concrete_fid;
+	u16 opaque_fid;
+	u8 vport_id;		/* VPORT ID */
+	u16 mtu;		/* VPORT MTU */
+	bool zero_placement_offset;
+};
+
+/**
+ * @brief ecore_sp_vport_start -
+ *
+ * This ramrod initializes a VPort. An Assert if generated if the Function ID
+ * of the VPort is not enabled.
+ *
+ * @param p_hwfn
+ * @param p_params		VPORT start params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+		     struct ecore_sp_vport_start_params *p_params);
+
+struct ecore_sp_vport_update_params {
+	u16 opaque_fid;
+	u8 vport_id;
+	u8 update_vport_active_rx_flg;
+	u8 vport_active_rx_flg;
+	u8 update_vport_active_tx_flg;
+	u8 vport_active_tx_flg;
+	u8 update_inner_vlan_removal_flg;
+	u8 inner_vlan_removal_flg;
+	u8 silent_vlan_removal_flg;
+	u8 update_default_vlan_enable_flg;
+	u8 default_vlan_enable_flg;
+	u8 update_default_vlan_flg;
+	u16 default_vlan;
+	u8 update_tx_switching_flg;
+	u8 tx_switching_flg;
+	u8 update_approx_mcast_flg;
+	u8 update_anti_spoofing_en_flg;
+	u8 anti_spoofing_en;
+	u8 update_accept_any_vlan_flg;
+	u8 accept_any_vlan;
+	unsigned long bins[8];
+	struct ecore_rss_params *rss_params;
+	struct ecore_filter_accept_flags accept_flags;
+	struct ecore_sge_tpa_params *sge_tpa_params;
+};
+
+/**
+ * @brief ecore_sp_vport_update -
+ *
+ * This ramrod updates the parameters of the VPort. Every field can be updated
+ * independently, according to flags.
+ *
+ * This ramrod is also used to set the VPort state to active after creation.
+ * An Assert is generated if the VPort does not contain an RX queue.
+ *
+ * @param p_hwfn
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+		      struct ecore_sp_vport_update_params *p_params,
+		      enum spq_mode comp_mode,
+		      struct ecore_spq_comp_cb *p_comp_data);
+/**
+ * @brief ecore_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+					 u16 opaque_fid, u8 vport_id);
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+			  u16 opaque_fid,
+			  struct ecore_filter_ucast *p_filter_cmd,
+			  enum spq_mode comp_mode,
+			  struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id		RX Queue ID
+ * @param num_rxqs              Allow to update multiple rx
+ *				queues, from rx_queue_id to
+ *				(rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg	Post completion to the CQE Ring if set
+ * @param complete_event_flg	Post completion to the Event Ring if set
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+			      u16 rx_queue_id,
+			      u8 num_rxqs,
+			      u8 complete_cqe_flg,
+			      u8 complete_event_flg,
+			      enum spq_mode comp_mode,
+			      struct ecore_spq_comp_cb *p_comp_data);
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+			     struct ecore_ptt *p_ptt,
+			     struct ecore_eth_stats *stats,
+			     u16 statistics_bin, bool b_get_port_stats);
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+			   struct ecore_eth_stats *stats);
+
+void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+
+#endif
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
new file mode 100644
index 0000000..0fc043e
--- /dev/null
+++ b/drivers/net/qede/qede_eth_if.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_ethdev.h"
+
+static int
+qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
+{
+	int rc, i;
+
+	for_each_hwfn(edev, i) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+		u8 tx_switching = 0;
+		struct ecore_sp_vport_start_params start = { 0 };
+
+		start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
+		    ECORE_TPA_MODE_NONE;
+		start.remove_inner_vlan = p_params->remove_inner_vlan;
+		start.tx_switching = tx_switching;
+		start.only_untagged = false;	/* untagged only */
+		start.drop_ttl0 = p_params->drop_ttl0;
+		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+		start.handle_ptp_pkts = p_params->handle_ptp_pkts;
+		start.vport_id = p_params->vport_id;
+		start.max_buffers_per_cqe = 16;	/* TODO-is this right */
+		start.mtu = p_params->mtu;
+
+		rc = ecore_sp_vport_start(p_hwfn, &start);
+		if (rc) {
+			DP_ERR(edev, "Failed to start VPORT\n");
+			return rc;
+		}
+
+		ecore_hw_start_fastpath(p_hwfn);
+
+		DP_VERBOSE(edev, ECORE_MSG_SPQ,
+			   "Started V-PORT %d with MTU %d\n",
+			   p_params->vport_id, p_params->mtu);
+	}
+
+	ecore_reset_vport_stats(edev);
+
+	return 0;
+}
+
+static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
+{
+	int rc, i;
+
+	for_each_hwfn(edev, i) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+		rc = ecore_sp_vport_stop(p_hwfn,
+					 p_hwfn->hw_info.opaque_fid, vport_id);
+
+		if (rc) {
+			DP_ERR(edev, "Failed to stop VPORT\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int
+qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
+{
+	struct ecore_sp_vport_update_params sp_params;
+	struct ecore_rss_params sp_rss_params;
+	int rc, i;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+	/* Translate protocol params into sp params */
+	sp_params.vport_id = params->vport_id;
+	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
+	sp_params.vport_active_rx_flg = params->vport_active_flg;
+	sp_params.vport_active_tx_flg = params->vport_active_flg;
+	sp_params.update_inner_vlan_removal_flg =
+	    params->update_inner_vlan_removal_flg;
+	sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
+	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+	sp_params.tx_switching_flg = params->tx_switching_flg;
+	sp_params.accept_any_vlan = params->accept_any_vlan;
+	sp_params.update_accept_any_vlan_flg =
+	    params->update_accept_any_vlan_flg;
+
+	/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+	 * We need to re-fix the rss values per engine for CMT.
+	 */
+
+	if (edev->num_hwfns > 1 && params->update_rss_flg) {
+		struct qed_update_vport_rss_params *rss = &params->rss_params;
+		int k, max = 0;
+
+		/* Find largest entry, since it's possible RSS needs to
+		 * be disabled [in case only 1 queue per-hwfn]
+		 */
+		for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
+			max = (max > rss->rss_ind_table[k]) ?
+			    max : rss->rss_ind_table[k];
+
+		/* Either fix RSS values or disable RSS */
+		if (edev->num_hwfns < max + 1) {
+			int divisor = (max + edev->num_hwfns - 1) /
+			    edev->num_hwfns;
+
+			DP_VERBOSE(edev, ECORE_MSG_SPQ,
+				   "CMT - fixing RSS values (modulo %02x)\n",
+				   divisor);
+
+			for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
+				rss->rss_ind_table[k] =
+				    rss->rss_ind_table[k] % divisor;
+		} else {
+			DP_VERBOSE(edev, ECORE_MSG_SPQ,
+				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
+			params->update_rss_flg = 0;
+		}
+	}
+
+	/* Now, update the RSS configuration for actual configuration */
+	if (params->update_rss_flg) {
+		sp_rss_params.update_rss_config = 1;
+		sp_rss_params.rss_enable = 1;
+		sp_rss_params.update_rss_capabilities = 1;
+		sp_rss_params.update_rss_ind_table = 1;
+		sp_rss_params.update_rss_key = 1;
+		sp_rss_params.rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
+		    ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
+		sp_rss_params.rss_table_size_log = 7;	/* 2^7 = 128 */
+		rte_memcpy(sp_rss_params.rss_ind_table,
+		       params->rss_params.rss_ind_table,
+		       ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t));
+		rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+		       ECORE_RSS_KEY_SIZE * sizeof(uint32_t));
+	}
+	sp_params.rss_params = &sp_rss_params;
+
+	for_each_hwfn(edev, i) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+		rc = ecore_sp_vport_update(p_hwfn, &sp_params,
+					   ECORE_SPQ_MODE_EBLOCK, NULL);
+		if (rc) {
+			DP_ERR(edev, "Failed to update VPORT\n");
+			return rc;
+		}
+
+		DP_VERBOSE(edev, ECORE_MSG_SPQ,
+			   "Updated V-PORT %d: active_flag %d [update %d]\n",
+			   params->vport_id, params->vport_active_flg,
+			   params->update_vport_active_flg);
+	}
+
+	return 0;
+}
+
+static int
+qed_start_rxq(struct ecore_dev *edev,
+	      uint8_t rss_id, uint8_t rx_queue_id,
+	      uint8_t vport_id, uint16_t sb,
+	      uint8_t sb_index, uint16_t bd_max_bytes,
+	      dma_addr_t bd_chain_phys_addr,
+	      dma_addr_t cqe_pbl_addr,
+	      uint16_t cqe_pbl_size, void OSAL_IOMEM**pp_prod)
+{
+	struct ecore_hwfn *p_hwfn;
+	int rc, hwfn_index;
+
+	hwfn_index = rss_id % edev->num_hwfns;
+	p_hwfn = &edev->hwfns[hwfn_index];
+
+	rc = ecore_sp_eth_rx_queue_start(p_hwfn,
+					 p_hwfn->hw_info.opaque_fid,
+					 rx_queue_id / edev->num_hwfns,
+					 vport_id,
+					 vport_id,
+					 sb,
+					 sb_index,
+					 bd_max_bytes,
+					 bd_chain_phys_addr,
+					 cqe_pbl_addr, cqe_pbl_size, pp_prod);
+
+	if (rc) {
+		DP_ERR(edev, "Failed to start RXQ#%d\n", rx_queue_id);
+		return rc;
+	}
+
+	DP_VERBOSE(edev, ECORE_MSG_SPQ,
+		   "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+		   rx_queue_id, rss_id, vport_id, sb);
+
+	return 0;
+}
+
+static int
+qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
+{
+	int rc, hwfn_index;
+	struct ecore_hwfn *p_hwfn;
+
+	hwfn_index = params->rss_id % edev->num_hwfns;
+	p_hwfn = &edev->hwfns[hwfn_index];
+
+	rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
+					params->rx_queue_id / edev->num_hwfns,
+					params->eq_completion_only, false);
+	if (rc) {
+		DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int
+qed_start_txq(struct ecore_dev *edev,
+	      uint8_t rss_id, uint16_t tx_queue_id,
+	      uint8_t vport_id, uint16_t sb,
+	      uint8_t sb_index,
+	      dma_addr_t pbl_addr,
+	      uint16_t pbl_size, void OSAL_IOMEM**pp_doorbell)
+{
+	struct ecore_hwfn *p_hwfn;
+	int rc, hwfn_index;
+
+	hwfn_index = rss_id % edev->num_hwfns;
+	p_hwfn = &edev->hwfns[hwfn_index];
+
+	rc = ecore_sp_eth_tx_queue_start(p_hwfn,
+					 p_hwfn->hw_info.opaque_fid,
+					 tx_queue_id / edev->num_hwfns,
+					 vport_id,
+					 vport_id,
+					 sb,
+					 sb_index,
+					 pbl_addr, pbl_size, pp_doorbell);
+
+	if (rc) {
+		DP_ERR(edev, "Failed to start TXQ#%d\n", tx_queue_id);
+		return rc;
+	}
+
+	DP_VERBOSE(edev, ECORE_MSG_SPQ,
+		   "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+		   tx_queue_id, rss_id, vport_id, sb);
+
+	return 0;
+}
+
+static int
+qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
+{
+	struct ecore_hwfn *p_hwfn;
+	int rc, hwfn_index;
+
+	hwfn_index = params->rss_id % edev->num_hwfns;
+	p_hwfn = &edev->hwfns[hwfn_index];
+
+	rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
+					params->tx_queue_id / edev->num_hwfns);
+	if (rc) {
+		DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int
+qed_fp_cqe_completion(struct ecore_dev *edev,
+		      uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
+{
+
+	return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
+					cqe);
+}
+
+static int qed_fastpath_stop(struct ecore_dev *edev)
+{
+	ecore_hw_stop_fastpath(edev);
+
+	return 0;
+}
+
+static void
+qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
+{
+	ecore_get_vport_stats(edev, stats);
+}
+
+static int
+qed_configure_filter_ucast(struct ecore_dev *edev,
+			   struct qed_filter_ucast_params *params)
+{
+	struct ecore_filter_ucast ucast;
+
+	if (!params->vlan_valid && !params->mac_valid) {
+		DP_NOTICE(edev, true,
+			  "Tried configuring a unicast filter,"
+			  "but both MAC and VLAN are not set\n");
+		return -EINVAL;
+	}
+
+	memset(&ucast, 0, sizeof(ucast));
+	switch (params->type) {
+	case QED_FILTER_XCAST_TYPE_ADD:
+		ucast.opcode = ECORE_FILTER_ADD;
+		break;
+	case QED_FILTER_XCAST_TYPE_DEL:
+		ucast.opcode = ECORE_FILTER_REMOVE;
+		break;
+	case QED_FILTER_XCAST_TYPE_REPLACE:
+		ucast.opcode = ECORE_FILTER_REPLACE;
+		break;
+	default:
+		DP_NOTICE(edev, true, "Unknown unicast filter type %d\n",
+			  params->type);
+	}
+
+	if (params->vlan_valid && params->mac_valid) {
+		ucast.type = ECORE_FILTER_MAC_VLAN;
+		ether_addr_copy((struct ether_addr *)&params->mac,
+				(struct ether_addr *)&ucast.mac);
+		ucast.vlan = params->vlan;
+	} else if (params->mac_valid) {
+		ucast.type = ECORE_FILTER_MAC;
+		ether_addr_copy((struct ether_addr *)&params->mac,
+				(struct ether_addr *)&ucast.mac);
+	} else {
+		ucast.type = ECORE_FILTER_VLAN;
+		ucast.vlan = params->vlan;
+	}
+
+	ucast.is_rx_filter = true;
+	ucast.is_tx_filter = true;
+
+	return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+}
+
+static int
+qed_configure_filter_mcast(struct ecore_dev *edev,
+			   struct qed_filter_mcast_params *params)
+{
+	struct ecore_filter_mcast mcast;
+	int i;
+
+	memset(&mcast, 0, sizeof(mcast));
+	switch (params->type) {
+	case QED_FILTER_XCAST_TYPE_ADD:
+		mcast.opcode = ECORE_FILTER_ADD;
+		break;
+	case QED_FILTER_XCAST_TYPE_DEL:
+		mcast.opcode = ECORE_FILTER_REMOVE;
+		break;
+	default:
+		DP_NOTICE(edev, true, "Unknown multicast filter type %d\n",
+			  params->type);
+	}
+
+	mcast.num_mc_addrs = params->num;
+	for (i = 0; i < mcast.num_mc_addrs; i++)
+		ether_addr_copy((struct ether_addr *)&params->mac[i],
+				(struct ether_addr *)&mcast.mac[i]);
+
+	return ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+}
+
+int
+qed_configure_filter_rx_mode(struct ecore_dev *edev,
+			     enum qed_filter_rx_mode_type type)
+{
+	struct ecore_filter_accept_flags accept_flags;
+
+	memset(&accept_flags, 0, sizeof(accept_flags));
+
+	accept_flags.update_rx_mode_config = 1;
+	accept_flags.update_tx_mode_config = 1;
+	accept_flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+	    ECORE_ACCEPT_MCAST_MATCHED;
+	ECORE_ACCEPT_BCAST;
+	accept_flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+	    ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
+
+	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+		accept_flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+	else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+		accept_flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+	else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+			  QED_FILTER_RX_MODE_TYPE_PROMISC))
+		accept_flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+		    ECORE_ACCEPT_MCAST_UNMATCHED;
+
+	return ecore_filter_accept_cmd(edev, 0, accept_flags, false, false,
+				       ECORE_SPQ_MODE_CB, NULL);
+}
+
+static int
+qed_configure_filter(struct ecore_dev *edev, struct qed_filter_params *params)
+{
+	switch (params->type) {
+	case QED_FILTER_TYPE_UCAST:
+		return qed_configure_filter_ucast(edev, &params->filter.ucast);
+	case QED_FILTER_TYPE_MCAST:
+		return qed_configure_filter_mcast(edev, &params->filter.mcast);
+	case QED_FILTER_TYPE_RX_MODE:
+		return qed_configure_filter_rx_mode(edev,
+						    params->filter.
+						    accept_flags);
+	default:
+		DP_NOTICE(edev, true, "Unknown filter type %d\n",
+			  (int)params->type);
+		return -EINVAL;
+	}
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+	INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+	INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+	INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
+	INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
+	INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
+	INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
+	INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
+	INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
+	INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
+	INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
+	INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
+	INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
+	INIT_STRUCT_FIELD(filter_config, &qed_configure_filter),
+};
+
+uint32_t qed_get_protocol_version(enum qed_protocol protocol)
+{
+	switch (protocol) {
+	case QED_PROTOCOL_ETH:
+		return QED_ETH_INTERFACE_VERSION;
+	default:
+		return 0;
+	}
+}
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+	return &qed_eth_ops_pass;
+}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index 47b169d..bc6f86b 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -168,7 +168,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
 extern int qed_fill_eth_dev_info(struct ecore_dev *edev,
 				 struct qed_dev_eth_info *info);
 
-void qed_put_eth_ops(void);
+const struct qed_eth_ops *qed_get_eth_ops();
 
 int qed_configure_filter_rx_mode(struct ecore_dev *edev,
 				 enum qed_filter_rx_mode_type type);
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 2915156..181300e 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -587,6 +587,14 @@ static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
 	return qede_dev_set_link_state(eth_dev, false);
 }
 
+static void qede_reset_stats(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	ecore_reset_vport_stats(edev);
+}
+
 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
 {
 	enum qed_filter_rx_mode_type type =
@@ -686,6 +694,7 @@ static struct eth_dev_ops qede_eth_dev_ops = {
 	.dev_stop = qede_dev_stop,
 	.dev_close = qede_dev_close,
 	.stats_get = qede_get_stats,
+	.stats_reset = qede_reset_stats,
 	.mac_addr_add = qede_mac_addr_add,
 	.mac_addr_remove = qede_mac_addr_remove,
 	.vlan_offload_set = qede_vlan_offload_set,
@@ -746,9 +755,11 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
-	if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
-		DP_ERR(edev, "Version mismatch [%08x != %08x]\n",
-		       qed_ver, QEDE_ETH_INTERFACE_VERSION);
+	qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+
+	qed_ops = qed_get_eth_ops();
+	if (!qed_ops) {
+		DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 9f5be7a..f87c369 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -18,6 +18,7 @@
 #include "base/bcm_osal.h"
 #include "base/ecore.h"
 #include "base/ecore_dev_api.h"
+#include "base/ecore_l2_api.h"
 #include "base/ecore_sp_api.h"
 #include "base/ecore_mcp_api.h"
 #include "base/ecore_hsi_common.h"
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 935eed8..1b05ff8 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -152,4 +152,13 @@ struct qed_common_ops {
 			      uint32_t dp_module, uint8_t dp_level);
 };
 
+/**
+ * @brief qed_get_protocol_version
+ *
+ * @param protocol
+ *
+ * @return version supported by qed for given protocol driver
+ */
+uint32_t qed_get_protocol_version(enum qed_protocol protocol);
+
 #endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 7a1b986..1f25908 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -239,6 +239,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 		return rc;
 	}
 
+	ecore_reset_vport_stats(edev);
+
 	return 0;
 
 	ecore_hw_stop(edev);
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index d0450f7..f76f42c 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -526,6 +526,196 @@ qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
 	return index % n_rx_rings;
 }
 
+static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
+{
+	unsigned i;
+
+	srand((unsigned int)time(NULL));
+
+	for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
+		buff[i] = rand();
+}
+
+static int
+qede_config_rss(struct rte_eth_dev *eth_dev,
+		struct qed_update_vport_rss_params *rss_params)
+{
+	enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
+	struct rte_eth_rss_conf rss_conf =
+	    eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	unsigned i;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	/* Check if RSS conditions are met */
+
+	if (!(mode & ETH_MQ_RX_RSS)) {
+		DP_INFO(edev, "RSS flag is not set\n");
+		return -EINVAL;
+	} else {
+		DP_INFO(edev, "RSS flag is set\n");
+	}
+
+	if (rss_conf.rss_hf == 0) {
+		DP_NOTICE(edev, false, "No RSS hash function to apply\n");
+		return -EINVAL;
+	}
+
+	if (QEDE_RSS_CNT(qdev) == 1) {
+		DP_NOTICE(edev, false, "RSS is not enabled with one queue\n");
+		return -EINVAL;
+	}
+
+	memset(rss_params, 0, sizeof(*rss_params));
+
+	for (i = 0; i < 128; i++)
+		rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
+							QEDE_RSS_CNT(qdev));
+
+	/* key and protocols */
+	if (rss_conf.rss_key == NULL) {
+		qede_prandom_bytes(rss_params->rss_key,
+				   sizeof(rss_params->rss_key));
+	} else {
+		/* Fill given by user */
+		DP_NOTICE(edev, false,
+			  "User provided rss key is not supported\n");
+		return -EINVAL;
+	}
+
+	DP_INFO(edev, "RSS check passes\n");
+
+	return 0;
+}
+
+static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
+	struct qed_dev_info *qed_info = &qdev->dev_info.common;
+	struct qed_update_vport_params vport_update_params;
+	struct qed_start_vport_params start = { 0 };
+	int vlan_removal_en = 1;
+	int rc, tc, i;
+
+	if (!qdev->num_rss) {
+		DP_ERR(edev,
+		       "Cannot update V-VPORT as active as"
+		       "there are no Rx queues\n");
+		return -EINVAL;
+	}
+
+	start.remove_inner_vlan = vlan_removal_en;
+	start.gro_enable = !qdev->gro_disable;
+	start.mtu = qdev->mtu;
+	start.vport_id = 0;
+	start.drop_ttl0 = true;
+	start.clear_stats = clear_stats;
+
+	rc = qdev->ops->vport_start(edev, &start);
+	if (rc) {
+		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+		return rc;
+	}
+
+	DP_INFO(edev,
+		"Start vport ramrod passed, vport_id = %d,"
+		" MTU = %d, vlan_removal_en = %d\n",
+		start.vport_id, qdev->mtu + 0xe, vlan_removal_en);
+
+	for_each_rss(i) {
+		struct qede_fastpath *fp = &qdev->fp_array[i];
+		dma_addr_t p_phys_table;
+		uint16_t page_cnt;
+
+		p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+		page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+
+		ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);	/* @DPDK */
+
+		rc = qdev->ops->q_rx_start(edev, i, i, 0,
+					   fp->sb_info->igu_sb_id,
+					   RX_PI,
+					   fp->rxq->rx_buf_size,
+					   fp->rxq->rx_bd_ring.p_phys_addr,
+					   p_phys_table,
+					   page_cnt,
+					   &fp->rxq->hw_rxq_prod_addr);
+		if (rc) {
+			DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+			return rc;
+		}
+
+		fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+		qede_update_rx_prod(qdev, fp->rxq);
+
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct qede_tx_queue *txq = fp->txqs[tc];
+			int txq_index = tc * QEDE_RSS_CNT(qdev) + i;
+
+			p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+			page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+			rc = qdev->ops->q_tx_start(edev, i, txq_index,
+						   0,
+						   fp->sb_info->igu_sb_id,
+						   TX_PI(tc),
+						   p_phys_table, page_cnt,
+						   &txq->doorbell_addr);
+			if (rc) {
+				DP_ERR(edev, "Start txq %u failed %d\n",
+				       txq_index, rc);
+				return rc;
+			}
+
+			txq->hw_cons_ptr =
+			    &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+			SET_FIELD(txq->tx_db.data.params,
+				  ETH_DB_DATA_DEST, DB_DEST_XCM);
+			SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+				  DB_AGG_CMD_SET);
+			SET_FIELD(txq->tx_db.data.params,
+				  ETH_DB_DATA_AGG_VAL_SEL,
+				  DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+			txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+		}
+	}
+
+	/* Prepare and send the vport enable */
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = start.vport_id;
+	vport_update_params.update_vport_active_flg = 1;
+	vport_update_params.vport_active_flg = 1;
+
+	/* @DPDK */
+	if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
+		/* TBD: Check SRIOV enabled for VF */
+		vport_update_params.update_tx_switching_flg = 1;
+		vport_update_params.tx_switching_flg = 1;
+	}
+
+	if (!qede_config_rss(eth_dev, rss_params))
+		vport_update_params.update_rss_flg = 1;
+
+	DP_INFO(edev, "Updating RSS flag to %d\n",
+		vport_update_params.update_rss_flg);
+
+	rte_memcpy(&vport_update_params.rss_params, rss_params,
+	       sizeof(*rss_params));
+
+	rc = qdev->ops->vport_update(edev, &vport_update_params);
+	if (rc) {
+		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 #ifdef ENC_SUPPORTED
 static bool qede_tunn_exist(uint16_t flag)
 {
@@ -955,6 +1145,8 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
 		return -EINVAL;
 	}
 
+	rc = qede_start_queues(eth_dev, true);
+
 	if (rc) {
 		DP_ERR(edev, "Failed to start queues\n");
 		/* TBD: free */
-- 
1.7.10.3

  parent reply	other threads:[~2016-03-19  0:53 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-19  0:53 [PATCH v3 00/10] qede: Add qede PMD Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 01/10] qede: Add maintainers Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 02/10] qede: Add documentation Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 03/10] qede: Add license file Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 04/10] qede: Add base driver Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 05/10] qede: Add core driver Rasesh Mody
2016-03-21 17:32   ` Stephen Hemminger
2016-03-22  1:14     ` Rasesh Mody
2016-03-22 10:55   ` Bruce Richardson
2016-03-22 11:03     ` Bruce Richardson
2016-03-19  0:53 ` Rasesh Mody [this message]
2016-03-22 11:04   ` [PATCH v3 06/10] qede: Add L2 support Bruce Richardson
2016-03-19  0:53 ` [PATCH v3 07/10] qede: Add SRIOV support Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 08/10] qede: Add attention support Rasesh Mody
2016-03-22 11:07   ` Bruce Richardson
2016-03-19  0:53 ` [PATCH v3 09/10] qede: Add DCBX support Rasesh Mody
2016-03-19  0:53 ` [PATCH v3 10/10] qede: Enable PMD build Rasesh Mody
2016-03-22 11:21   ` Bruce Richardson
2016-03-22 11:21 ` [PATCH v3 00/10] qede: Add qede PMD Richardson, Bruce
2016-03-22 11:30   ` Bruce Richardson
2016-03-29 20:52     ` Rasesh Mody
2016-03-30 12:34       ` Bruce Richardson
2016-03-24  1:52   ` Rasesh Mody

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1458348805-32648-7-git-send-email-rasesh.mody@qlogic.com \
    --to=rasesh.mody@qlogic.com \
    --cc=ameen.rahman@qlogic.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=harish.patil@qlogic.com \
    --cc=sony.chacko@qlogic.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.