All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rasesh Mody <rmody@marvell.com>
To: <dev@dpdk.org>
Cc: Rasesh Mody <rmody@marvell.com>, <jerinj@marvell.com>,
	<ferruh.yigit@intel.com>, <GR-Everest-DPDK-Dev@marvell.com>
Subject: [dpdk-dev] [PATCH v2 3/4] net/bnx2x: update to latest FW 7.13.11
Date: Thu, 19 Sep 2019 14:11:56 -0700	[thread overview]
Message-ID: <20190919211157.1668-4-rmody@marvell.com> (raw)
In-Reply-To: <20190906072548.12304-1-rmody@marvell.com>

Use latest firmware 7.13.11.

Some of the fixes included with this FW are as following:
    - Packets from a VF with pvid configured which were sent with a
      different vlan were transmitted instead of being discarded.
    - In some multi-function configurations, inter-PF and inter-VF
      Tx switching is incorrectly enabled.
    - Wrong assert code in FLR final cleanup in case it is sent not
      after FLR.
    - Chip may stall in very rare cases under heavy traffic with FW GRO
      enabled.
    - VF malicious notification error fixes.
    - Default gre tunnel to IPGRE which allows proper RSS for IPGRE
      packets, L2GRE traffic will reach single queue.
    - Removes unnecessary internal mem config, latest FW performs this
      autonomously.

Update the PMD version to 1.1.0.1.

Signed-off-by: Rasesh Mody <rmody@marvell.com>
---
 doc/guides/nics/bnx2x.rst          |   4 +-
 drivers/net/bnx2x/bnx2x.c          |  40 +---
 drivers/net/bnx2x/bnx2x.h          |   5 +-
 drivers/net/bnx2x/ecore_fw_defs.h  | 252 ++++++++++++-----------
 drivers/net/bnx2x/ecore_hsi.h      |   2 +-
 drivers/net/bnx2x/ecore_init.h     | 214 ++++++++++----------
 drivers/net/bnx2x/ecore_init_ops.h | 192 ++++++++----------
 drivers/net/bnx2x/ecore_mfw_req.h  |  11 +-
 drivers/net/bnx2x/ecore_sp.c       |  39 ++--
 drivers/net/bnx2x/ecore_sp.h       | 308 ++++++++++++++++++++++++-----
 10 files changed, 640 insertions(+), 427 deletions(-)

diff --git a/doc/guides/nics/bnx2x.rst b/doc/guides/nics/bnx2x.rst
index 00e131484..d3650267f 100644
--- a/doc/guides/nics/bnx2x.rst
+++ b/doc/guides/nics/bnx2x.rst
@@ -93,9 +93,9 @@ Supported QLogic NICs
 Prerequisites
 -------------
 
-- Requires firmware version **7.2.51.0**. It is included in most of the
+- Requires firmware version **7.13.11.0**. It is included in most of the
   standard Linux distros. If it is not available visit
-  `linux-firmware git repository <https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/bnx2x/bnx2x-e2-7.2.51.0.fw>`_
+  `linux-firmware git repository <https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/bnx2x/bnx2x-e2-7.13.11.0.fw>`_
   to get the required firmware.
 
 Pre-Installation Configuration
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 7a76c308a..c1663b16a 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -29,8 +29,8 @@
 
 #define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
 #define BNX2X_PMD_VERSION_MAJOR 1
-#define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_REVISION 7
+#define BNX2X_PMD_VERSION_MINOR 1
+#define BNX2X_PMD_VERSION_REVISION 0
 #define BNX2X_PMD_VERSION_PATCH 1
 
 static inline const char *
@@ -5230,20 +5230,6 @@ static void bnx2x_init_internal_common(struct bnx2x_softc *sc)
 {
 	int i;
 
-	if (IS_MF_SI(sc)) {
-/*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH.
- */
-		REG_WR8(sc,
-			(BAR_TSTRORM_INTMEM +
-			 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 2);
-	} else
-		REG_WR8(sc,
-			(BAR_TSTRORM_INTMEM +
-			 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 0);
-
 	/*
 	 * Zero this manually as its initialization is currently missing
 	 * in the initTool.
@@ -5797,15 +5783,12 @@ static void bnx2x_init_objs(struct bnx2x_softc *sc)
 				    VNICS_PER_PATH(sc));
 
 	/* RSS configuration object */
-	ecore_init_rss_config_obj(&sc->rss_conf_obj,
-				  sc->fp[0].cl_id,
-				  sc->fp[0].index,
-				  SC_FUNC(sc),
-				  SC_FUNC(sc),
+	ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp->cl_id,
+				  sc->fp->index, SC_FUNC(sc), SC_FUNC(sc),
 				  BNX2X_SP(sc, rss_rdata),
 				  (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata),
-				  ECORE_FILTER_RSS_CONF_PENDING,
-				  &sc->sp_state, ECORE_OBJ_TYPE_RX);
+				  ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state,
+				  ECORE_OBJ_TYPE_RX);
 }
 
 /*
@@ -5834,9 +5817,6 @@ static int bnx2x_func_start(struct bnx2x_softc *sc)
 		start_params->network_cos_mode = FW_WRR;
 	}
 
-	start_params->gre_tunnel_mode = 0;
-	start_params->gre_tunnel_rss = 0;
-
 	return ecore_func_state_change(sc, &func_params);
 }
 
@@ -9650,8 +9630,8 @@ static void bnx2x_init_rte(struct bnx2x_softc *sc)
 }
 
 #define FW_HEADER_LEN 104
-#define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.2.51.0.fw"
-#define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.2.51.0.fw"
+#define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.13.11.0.fw"
+#define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.13.11.0.fw"
 
 void bnx2x_load_firmware(struct bnx2x_softc *sc)
 {
@@ -10367,7 +10347,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
 
 	/* clean the DMAE memory */
 	sc->dmae_ready = 1;
-	ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8);
+	ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
 
 	ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
 
@@ -11579,7 +11559,7 @@ static void bnx2x_reset_func(struct bnx2x_softc *sc)
 		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 		ilt_cli.client_num = ILT_CLIENT_TM;
 
-		ecore_ilt_boundry_init_op(sc, &ilt_cli, 0);
+		ecore_ilt_boundary_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
 	}
 
 	/* this assumes that reset_port() called before reset_func() */
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 054d95424..43c60408a 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -83,9 +83,6 @@
 #ifndef ARRAY_SIZE
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
 #endif
-#ifndef ARRSIZE
-#define ARRSIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-#endif
 #ifndef DIV_ROUND_UP
 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
 #endif
@@ -1020,6 +1017,8 @@ struct bnx2x_pci_cap {
 	uint16_t addr;
 };
 
+struct ecore_ilt;
+
 struct bnx2x_vfdb;
 
 /* Top level device private data structure. */
diff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h
index 5984acd94..5397a701a 100644
--- a/drivers/net/bnx2x/ecore_fw_defs.h
+++ b/drivers/net/bnx2x/ecore_fw_defs.h
@@ -13,170 +13,170 @@
 #ifndef ECORE_FW_DEFS_H
 #define ECORE_FW_DEFS_H
 
-
-#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[148].base)
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[152].base)
 #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[147].base + ((assertListEntry) * IRO[147].m1))
+	(IRO[151].base + ((assertListEntry) * IRO[151].m1))
 #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
-	(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
-	IRO[153].m2))
+	(IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
+	IRO[157].m2))
 #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
-	(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
-	IRO[154].m2))
-#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
-	(IRO[155].base + ((vfId) * IRO[155].m1))
-#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
-	(IRO[156].base + ((vfId) * IRO[156].m1))
-#define CSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[150].base + ((funcId) * IRO[150].m1))
+	(IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
+	IRO[158].m2))
 #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
-	(IRO[159].base + ((funcId) * IRO[159].m1))
+	(IRO[163].base + ((funcId) * IRO[163].m1))
 #define CSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[149].base + ((funcId) * IRO[149].m1))
+	(IRO[153].base + ((funcId) * IRO[153].m1))
 #define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
-	(IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
+	(IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
 #define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
-	(IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
-	* IRO[138].m2) + ((sbId) * IRO[138].m3))
-#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
+	(IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
+	* IRO[142].m2) + ((sbId) * IRO[142].m3))
+#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
 #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[317].base + ((pfId) * IRO[317].m1))
+	(IRO[323].base + ((pfId) * IRO[323].m1))
 #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[318].base + ((pfId) * IRO[318].m1))
+	(IRO[324].base + ((pfId) * IRO[324].m1))
 #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
-	(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+	(IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
 #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+	(IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+	(IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
-	(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+	(IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
 #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
-	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
-#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
 	(IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+	(IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
 #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
-	(IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+	(IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
 #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[316].base + ((pfId) * IRO[316].m1))
+	(IRO[322].base + ((pfId) * IRO[322].m1))
 #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[308].base + ((pfId) * IRO[308].m1))
+	(IRO[314].base + ((pfId) * IRO[314].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[307].base + ((pfId) * IRO[307].m1))
+	(IRO[313].base + ((pfId) * IRO[313].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[306].base + ((pfId) * IRO[306].m1))
+	(IRO[312].base + ((pfId) * IRO[312].m1))
 #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
-	(IRO[151].base + ((funcId) * IRO[151].m1))
+	(IRO[155].base + ((funcId) * IRO[155].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
-	(IRO[142].base + ((pfId) * IRO[142].m1))
+	(IRO[146].base + ((pfId) * IRO[146].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
-	(IRO[143].base + ((pfId) * IRO[143].m1))
+	(IRO[147].base + ((pfId) * IRO[147].m1))
 #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
-	(IRO[141].base + ((pfId) * IRO[141].m1))
-#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+	(IRO[145].base + ((pfId) * IRO[145].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
 #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
-	(IRO[144].base + ((pfId) * IRO[144].m1))
-#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+	(IRO[148].base + ((pfId) * IRO[148].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
 #define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
-	(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
+	(IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
 #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
-	(IRO[133].base + ((sbId) * IRO[133].m1))
+	(IRO[137].base + ((sbId) * IRO[137].m1))
 #define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
-	(IRO[134].base + ((sbId) * IRO[134].m1))
+	(IRO[138].base + ((sbId) * IRO[138].m1))
 #define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
-	(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
+	(IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
 #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
-	(IRO[132].base + ((sbId) * IRO[132].m1))
-#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+	(IRO[136].base + ((sbId) * IRO[136].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
 #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
-	(IRO[137].base + ((sbId) * IRO[137].m1))
-#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
+	(IRO[141].base + ((sbId) * IRO[141].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+	(IRO[159].base + ((vfId) * IRO[159].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+	(IRO[160].base + ((vfId) * IRO[160].m1))
 #define CSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
+	(IRO[154].base + ((funcId) * IRO[154].m1))
 #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
-	(IRO[203].base + ((pfId) * IRO[203].m1))
+	(IRO[207].base + ((pfId) * IRO[207].m1))
 #define TSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[102].base)
 #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
 	(IRO[101].base + ((assertListEntry) * IRO[101].m1))
 #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
-	(IRO[201].base + ((pfId) * IRO[201].m1))
+	(IRO[205].base + ((pfId) * IRO[205].m1))
 #define TSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[103].base + ((funcId) * IRO[103].m1))
+	(IRO[107].base + ((funcId) * IRO[107].m1))
 #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[272].base + ((pfId) * IRO[272].m1))
+	(IRO[278].base + ((pfId) * IRO[278].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
+	(IRO[279].base + ((pfId) * IRO[279].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+	(IRO[280].base + ((pfId) * IRO[280].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+	(IRO[281].base + ((pfId) * IRO[281].m1))
 #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[271].base + ((pfId) * IRO[271].m1))
+	(IRO[277].base + ((pfId) * IRO[277].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[270].base + ((pfId) * IRO[270].m1))
+	(IRO[276].base + ((pfId) * IRO[276].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[269].base + ((pfId) * IRO[269].m1))
+	(IRO[275].base + ((pfId) * IRO[275].m1))
 #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-	(IRO[268].base + ((pfId) * IRO[268].m1))
+	(IRO[274].base + ((pfId) * IRO[274].m1))
 #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
-	(IRO[278].base + ((pfId) * IRO[278].m1))
+	(IRO[284].base + ((pfId) * IRO[284].m1))
 #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[264].base + ((pfId) * IRO[264].m1))
+	(IRO[270].base + ((pfId) * IRO[270].m1))
 #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[265].base + ((pfId) * IRO[265].m1))
+	(IRO[271].base + ((pfId) * IRO[271].m1))
 #define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[266].base + ((pfId) * IRO[266].m1))
+	(IRO[272].base + ((pfId) * IRO[272].m1))
 #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[267].base + ((pfId) * IRO[267].m1))
+	(IRO[273].base + ((pfId) * IRO[273].m1))
 #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
-	(IRO[202].base + ((pfId) * IRO[202].m1))
+	(IRO[206].base + ((pfId) * IRO[206].m1))
 #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
-	(IRO[105].base + ((funcId) * IRO[105].m1))
+	(IRO[109].base + ((funcId) * IRO[109].m1))
 #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
-	(IRO[217].base + ((pfId) * IRO[217].m1))
+	(IRO[223].base + ((pfId) * IRO[223].m1))
 #define TSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[104].base + ((funcId) * IRO[104].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
-#define USTORM_AGG_DATA_SIZE (IRO[206].size)
-#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[177].base)
+	(IRO[108].base + ((funcId) * IRO[108].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
+#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[181].base)
 #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
-	(IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * IRO[205].m2))
+	(IRO[180].base + ((assertListEntry) * IRO[180].m1))
 #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
-	(IRO[183].base + ((portId) * IRO[183].m1))
+	(IRO[187].base + ((portId) * IRO[187].m1))
 #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
-	(IRO[319].base + ((pfId) * IRO[319].m1))
+	(IRO[325].base + ((pfId) * IRO[325].m1))
 #define USTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[178].base + ((funcId) * IRO[178].m1))
+	(IRO[182].base + ((funcId) * IRO[182].m1))
 #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[283].base + ((pfId) * IRO[283].m1))
+	(IRO[289].base + ((pfId) * IRO[289].m1))
 #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[284].base + ((pfId) * IRO[284].m1))
+	(IRO[290].base + ((pfId) * IRO[290].m1))
 #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[288].base + ((pfId) * IRO[288].m1))
+	(IRO[294].base + ((pfId) * IRO[294].m1))
 #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
-	(IRO[285].base + ((pfId) * IRO[285].m1))
+	(IRO[291].base + ((pfId) * IRO[291].m1))
 #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[281].base + ((pfId) * IRO[281].m1))
+	(IRO[287].base + ((pfId) * IRO[287].m1))
 #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[280].base + ((pfId) * IRO[280].m1))
+	(IRO[286].base + ((pfId) * IRO[286].m1))
 #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[279].base + ((pfId) * IRO[279].m1))
+	(IRO[285].base + ((pfId) * IRO[285].m1))
 #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[282].base + ((pfId) * IRO[282].m1))
+	(IRO[288].base + ((pfId) * IRO[288].m1))
 #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
-	(IRO[286].base + ((pfId) * IRO[286].m1))
+	(IRO[292].base + ((pfId) * IRO[292].m1))
 #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-	(IRO[287].base + ((pfId) * IRO[287].m1))
+	(IRO[293].base + ((pfId) * IRO[293].m1))
 #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
-	(IRO[182].base + ((pfId) * IRO[182].m1))
+	(IRO[186].base + ((pfId) * IRO[186].m1))
 #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
-	(IRO[180].base + ((funcId) * IRO[180].m1))
+	(IRO[184].base + ((funcId) * IRO[184].m1))
 #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
-	(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
-	IRO[209].m2))
+	(IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
+	IRO[215].m2))
 #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
-	(IRO[210].base + ((qzoneId) * IRO[210].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
-#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+	(IRO[216].base + ((qzoneId) * IRO[216].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
+#define USTORM_TPA_BTR_SIZE (IRO[213].size)
 #define USTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[179].base + ((funcId) * IRO[179].m1))
+	(IRO[183].base + ((funcId) * IRO[183].m1))
 #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
 #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
 #define XSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[51].base)
@@ -189,39 +189,39 @@
 #define XSTORM_FUNC_EN_OFFSET(funcId) \
 	(IRO[47].base + ((funcId) * IRO[47].m1))
 #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[296].base + ((pfId) * IRO[296].m1))
+	(IRO[302].base + ((pfId) * IRO[302].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
-	(IRO[299].base + ((pfId) * IRO[299].m1))
+	(IRO[305].base + ((pfId) * IRO[305].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
-	(IRO[300].base + ((pfId) * IRO[300].m1))
+	(IRO[306].base + ((pfId) * IRO[306].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
-	(IRO[301].base + ((pfId) * IRO[301].m1))
+	(IRO[307].base + ((pfId) * IRO[307].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
-	(IRO[302].base + ((pfId) * IRO[302].m1))
+	(IRO[308].base + ((pfId) * IRO[308].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
-	(IRO[303].base + ((pfId) * IRO[303].m1))
+	(IRO[309].base + ((pfId) * IRO[309].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
-	(IRO[304].base + ((pfId) * IRO[304].m1))
+	(IRO[310].base + ((pfId) * IRO[310].m1))
 #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
-	(IRO[305].base + ((pfId) * IRO[305].m1))
+	(IRO[311].base + ((pfId) * IRO[311].m1))
 #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[295].base + ((pfId) * IRO[295].m1))
+	(IRO[301].base + ((pfId) * IRO[301].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[294].base + ((pfId) * IRO[294].m1))
+	(IRO[300].base + ((pfId) * IRO[300].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[293].base + ((pfId) * IRO[293].m1))
+	(IRO[299].base + ((pfId) * IRO[299].m1))
 #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[298].base + ((pfId) * IRO[298].m1))
+	(IRO[304].base + ((pfId) * IRO[304].m1))
 #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
-	(IRO[297].base + ((pfId) * IRO[297].m1))
+	(IRO[303].base + ((pfId) * IRO[303].m1))
 #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
-	(IRO[292].base + ((pfId) * IRO[292].m1))
+	(IRO[298].base + ((pfId) * IRO[298].m1))
 #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[291].base + ((pfId) * IRO[291].m1))
+	(IRO[297].base + ((pfId) * IRO[297].m1))
 #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
-	(IRO[290].base + ((pfId) * IRO[290].m1))
+	(IRO[296].base + ((pfId) * IRO[296].m1))
 #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
-	(IRO[289].base + ((pfId) * IRO[289].m1))
+	(IRO[295].base + ((pfId) * IRO[295].m1))
 #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
 	(IRO[44].base + ((pfId) * IRO[44].m1))
 #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -234,15 +234,18 @@
 #define XSTORM_SPQ_PROD_OFFSET(funcId) \
 	(IRO[31].base + ((funcId) * IRO[31].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
-	(IRO[211].base + ((portId) * IRO[211].m1))
+	(IRO[217].base + ((portId) * IRO[217].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
-	(IRO[212].base + ((portId) * IRO[212].m1))
+	(IRO[218].base + ((portId) * IRO[218].m1))
 #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
-	(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
-	IRO[214].m2))
+	(IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
+	IRO[220].m2))
 #define XSTORM_VF_TO_PF_OFFSET(funcId) \
 	(IRO[48].base + ((funcId) * IRO[48].m1))
-#define COMMON_ASM_INVALID_ASSERT_OPCODE (IRO[7].base)
+#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
+
+/* eth hsi version */
+#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
 
 
 /* Ethernet Ring parameters */
@@ -250,19 +253,27 @@
 #define FIRST_BD_IN_PKT	0
 #define PARSE_BD_INDEX 1
 #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+#define U_ETH_NUM_OF_SGES_TO_FETCH 8
+#define U_ETH_MAX_SGES_FOR_PACKET 3
 
 /* Rx ring params */
 #define U_ETH_LOCAL_BD_RING_SIZE 8
+#define U_ETH_LOCAL_SGE_RING_SIZE 10
 #define U_ETH_SGL_SIZE 8
 	/* The fw will padd the buffer with this value, so the IP header \
 	will be align to 4 Byte */
 #define IP_HEADER_ALIGNMENT_PADDING 2
 
+#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
+	(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
+
 #define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
 #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
+#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
 
 #define U_ETH_BDS_PER_PAGE_MASK	(U_ETH_BDS_PER_PAGE-1)
 #define U_ETH_CQE_PER_PAGE_MASK	(TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
 
 #define U_ETH_UNDEFINED_Q 0xFF
 
@@ -281,20 +292,25 @@
 #define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
 
 /* Maximal L2 clients supported */
+#define ETH_MAX_RX_CLIENTS_E1 18
 #define ETH_MAX_RX_CLIENTS_E1H 28
 #define ETH_MAX_RX_CLIENTS_E2 152
 
 /* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
 #define MAX_STAT_COUNTER_ID_E1H	56
 #define MAX_STAT_COUNTER_ID_E2 140
 
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
 #define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
 #define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
 #define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
 #define MAX_VLAN_CREDIT_E2 272 /* Per Path */
 
 
 /* Maximal aggregation queues supported */
+#define ETH_MAX_AGGREGATION_QUEUES_E1 32
 #define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
 
 
@@ -302,6 +318,8 @@
 #define ETH_NUM_OF_MCAST_ENGINES_E2 72
 
 #define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
 #define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
 	(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
 
@@ -357,6 +375,7 @@
 
 /* used for Host Coallescing */
 #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
 
 /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
 
@@ -370,7 +389,7 @@
 #define MAX_COS_NUMBER 4
 #define MAX_TRAFFIC_TYPES 8
 #define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
 	/* used by array traffic_type_to_priority[] to mark traffic type \
 	that is not mapped to priority*/
 #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
@@ -397,5 +416,4 @@
 #define MAX_NUM_FCOE_TASKS_PER_ENGINE \
 	4096 /*Each port can have at max 1 function*/
 
-
 #endif /* ECORE_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 2728deb1d..aaf8b048e 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -5508,7 +5508,7 @@ struct afex_vif_list_ramrod_data {
  *
  */
 struct c2s_pri_trans_table_entry {
-	uint8_t val[8];
+	uint8_t val[MAX_VLAN_PRIORITIES];
 };
 
 
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index 97dfe69b5..4e348612a 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -26,10 +26,6 @@ enum {
 	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
 	OP_IF_MODE_OR,  /* Skip the following ops if all init modes don't match */
 	OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */
-	OP_IF_PHASE,
-	OP_RT,
-	OP_DELAY,
-	OP_VERIFY,
 	OP_MAX
 };
 
@@ -86,17 +82,6 @@ struct op_if_mode {
 	uint32_t mode_bit_map;
 };
 
-struct op_if_phase {
-	uint32_t op:8;
-	uint32_t cmd_offset:24;
-	uint32_t phase_bit_map;
-};
-
-struct op_delay {
-	uint32_t op:8;
-	uint32_t reserved:24;
-	uint32_t delay;
-};
 
 union init_op {
 	struct op_read		read;
@@ -105,8 +90,6 @@ union init_op {
 	struct op_zero		zero;
 	struct raw_op		raw;
 	struct op_if_mode	if_mode;
-	struct op_if_phase	if_phase;
-	struct op_delay		delay;
 };
 
 
@@ -187,12 +170,7 @@ enum {
 	NUM_OF_INIT_BLOCKS
 };
 
-
-
-
-
-
-
+#include "bnx2x.h"
 
 /* Vnics per mode */
 #define ECORE_PORT2_MODE_NUM_VNICS 4
@@ -239,7 +217,7 @@ static inline void ecore_map_q_cos(struct bnx2x_softc *sc, uint32_t q_num, uint3
 		/* update parameters for 4port mode */
 		if (INIT_MODE_FLAGS(sc) & MODE_PORT4) {
 			num_vnics = ECORE_PORT4_MODE_NUM_VNICS;
-			if (PORT_ID(sc)) {
+			if (SC_PORT(sc)) {
 				curr_cos += ECORE_E3B0_PORT1_COS_OFFSET;
 				new_cos += ECORE_E3B0_PORT1_COS_OFFSET;
 			}
@@ -248,7 +226,7 @@ static inline void ecore_map_q_cos(struct bnx2x_softc *sc, uint32_t q_num, uint3
 		/* change queue mapping for each VNIC */
 		for (vnic = 0; vnic < num_vnics; vnic++) {
 			uint32_t pf_q_num =
-				ECORE_PF_Q_NUM(q_num, PORT_ID(sc), vnic);
+				ECORE_PF_Q_NUM(q_num, SC_PORT(sc), vnic);
 			uint32_t q_bit_map = 1 << (pf_q_num & 0x1f);
 
 			/* overwrite queue->VOQ mapping */
@@ -427,7 +405,11 @@ static inline void ecore_init_min(const struct cmng_init_input *input_data,
 	tFair = T_FAIR_COEF / input_data->port_rate;
 
 	/* this is the threshold below which we won't arm the timer anymore */
-	pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+	pdata->fair_vars.fair_threshold = QM_ARB_BYTES +
+					  input_data->fairness_thr;
+
+	/*New limitation - minimal packet size to cause timeout to be armed */
+	pdata->fair_vars.size_thr = input_data->size_thr;
 
 	/*
 	 *  we multiply by 1e3/8 to get bytes/msec. We don't want the credits
@@ -469,6 +451,7 @@ static inline void ecore_init_min(const struct cmng_init_input *input_data,
 }
 
 static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data,
+				     uint32_t r_param __rte_unused,
 				     struct cmng_init *ram_data)
 {
 	uint32_t vnic, cos;
@@ -507,7 +490,9 @@ static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data,
 	}
 }
 
-static inline void ecore_init_safc(struct cmng_init *ram_data)
+static inline void
+ecore_init_safc(const struct cmng_init_input *input_data __rte_unused,
+		struct cmng_init *ram_data)
 {
 	/* in microSeconds */
 	ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
@@ -518,7 +503,7 @@ static inline void ecore_init_cmng(const struct cmng_init_input *input_data,
 				   struct cmng_init *ram_data)
 {
 	uint32_t r_param;
-	ECORE_MEMSET(ram_data, 0,sizeof(struct cmng_init));
+	ECORE_MEMSET(ram_data, 0, sizeof(struct cmng_init));
 
 	ram_data->port.flags = input_data->flags;
 
@@ -529,8 +514,8 @@ static inline void ecore_init_cmng(const struct cmng_init_input *input_data,
 	r_param = BITS_TO_BYTES(input_data->port_rate);
 	ecore_init_max(input_data, r_param, ram_data);
 	ecore_init_min(input_data, r_param, ram_data);
-	ecore_init_fw_wrr(input_data, ram_data);
-	ecore_init_safc(ram_data);
+	ecore_init_fw_wrr(input_data, r_param, ram_data);
+	ecore_init_safc(input_data, ram_data);
 }
 
 
@@ -585,25 +570,25 @@ struct src_ent {
 /****************************************************************************
 * Parity configuration
 ****************************************************************************/
-#define BLOCK_PRTY_INFO(block, en_mask, m1h, m2, m3) \
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
 { \
 	block##_REG_##block##_PRTY_MASK, \
 	block##_REG_##block##_PRTY_STS_CLR, \
-	en_mask, {m1h, m2, m3}, #block \
+	en_mask, {m1, m1h, m2, m3}, #block \
 }
 
-#define BLOCK_PRTY_INFO_0(block, en_mask, m1h, m2, m3) \
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
 { \
 	block##_REG_##block##_PRTY_MASK_0, \
 	block##_REG_##block##_PRTY_STS_CLR_0, \
-	en_mask, {m1h, m2, m3}, #block"_0" \
+	en_mask, {m1, m1h, m2, m3}, #block "_0" \
 }
 
-#define BLOCK_PRTY_INFO_1(block, en_mask, m1h, m2, m3) \
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
 { \
 	block##_REG_##block##_PRTY_MASK_1, \
 	block##_REG_##block##_PRTY_STS_CLR_1, \
-	en_mask, {m1h, m2, m3}, #block"_1" \
+	en_mask, {m1, m1h, m2, m3}, #block "_1" \
 }
 
 static const struct {
@@ -611,6 +596,7 @@ static const struct {
 	uint32_t sts_clr_addr;
 	uint32_t en_mask;		/* Mask to enable parity attentions */
 	struct {
+		uint32_t e1;		/* 57710 */
 		uint32_t e1h;	/* 57711 */
 		uint32_t e2;		/* 57712 */
 		uint32_t e3;		/* 578xx */
@@ -620,63 +606,67 @@ static const struct {
 				 */
 } ecore_blocks_parity_data[] = {
 	/* bit 19 masked */
-	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+	/* REG_WR(sc, PXP_REG_PXP_PRTY_MASK, 0x80000); */
 	/* bit 5,18,20-31 */
-	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+	/* REG_WR(sc, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
 	/* bit 5 */
-	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
-	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
-	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+	/* REG_WR(sc, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
+	/* REG_WR(sc, HC_REG_HC_PRTY_MASK, 0x0); */
+	/* REG_WR(sc, MISC_REG_MISC_PRTY_MASK, 0x0); */
 
 	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
 	 * want to handle "system kill" flow at the moment.
 	 */
-	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x7ffffff,
+	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
 			0x7ffffff),
-	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff,
+	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
 			  0xffffffff),
-	BLOCK_PRTY_INFO_1(PXP2,	0x1ffffff, 0x7f, 0x7ff, 0x1ffffff),
-	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0, 0),
-	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0xffffffff, 0, 0),
-	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0xffffffff, 0xffffffff),
-	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0xff, 0xffff),
-	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0x7ff, 0x7ff),
-	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
-	BLOCK_PRTY_INFO(QM, 0, 0xfff, 0xfff, 0xfff),
-	BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0x1f, 0x1f),
-	BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0x3, 0x3),
-	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+	BLOCK_PRTY_INFO_1(PXP2,	0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
+	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
+	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
+	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0, 0xff, 0xffff),
+	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
+	BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
+	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
 	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
 		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
-		{0xf, 0xf, 0xf}, "UPB"},
+		{0xf, 0xf, 0xf, 0xf}, "UPB"},
 	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
 		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
-		{0xf, 0xf, 0xf}, "XPB"},
-	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
-	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
-	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0x3f),
-	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
-	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
-	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
-	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
-	BLOCK_PRTY_INFO(PBF, 0, 0x3ffff, 0xfffff, 0xfffffff),
-	BLOCK_PRTY_INFO(TM, 0, 0x7f, 0x7f, 0x7f),
-	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
-	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
-	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
-	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
-	BLOCK_PRTY_INFO(TCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
-	BLOCK_PRTY_INFO(CCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
-	BLOCK_PRTY_INFO(UCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
-	BLOCK_PRTY_INFO(XCM, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
-	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
-	BLOCK_PRTY_INFO_1(TSEM, 0, 0x1f, 0x3f, 0x3f),
-	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
-	BLOCK_PRTY_INFO_1(USEM, 0, 0x1f, 0x1f, 0x1f),
-	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
-	BLOCK_PRTY_INFO_1(CSEM, 0, 0x1f, 0x1f, 0x1f),
-	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
-	BLOCK_PRTY_INFO_1(XSEM, 0, 0x1f, 0x3f, 0x3f),
+		{0xf, 0xf, 0xf, 0xf}, "XPB"},
+	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
+	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
+	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(PRS, (1 << 6), 0xff, 0xff, 0xff, 0xff),
+	BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
+	BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
+	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
 };
 
 
@@ -685,45 +675,59 @@ static const struct {
  * [30] MCP Latched ump_tx_parity
  * [31] MCP Latched scpad_parity
  */
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS	\
 	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
-	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
+	(MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
 
 /* Below registers control the MCP parity attention output. When
  * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
  * enabled, when cleared - disabled.
  */
-static const uint32_t mcp_attn_ctl_regs[] = {
-	MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
-	MISC_REG_AEU_ENABLE4_NIG_0,
-	MISC_REG_AEU_ENABLE4_PXP_0,
-	MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
-	MISC_REG_AEU_ENABLE4_NIG_1,
-	MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+	uint32_t addr;
+	uint32_t bits;
+} mcp_attn_ctl_regs[] = {
+	{ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+		MISC_AEU_ENABLE_MCP_PRTY_BITS },
+	{ MISC_REG_AEU_ENABLE4_NIG_0,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+	{ MISC_REG_AEU_ENABLE4_PXP_0,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+	{ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+		MISC_AEU_ENABLE_MCP_PRTY_BITS },
+	{ MISC_REG_AEU_ENABLE4_NIG_1,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+	{ MISC_REG_AEU_ENABLE4_PXP_1,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
 };
 
 static inline void ecore_set_mcp_parity(struct bnx2x_softc *sc, uint8_t enable)
 {
-	uint32_t i;
+	unsigned int i;
 	uint32_t reg_val;
 
-	for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) {
-		reg_val = REG_RD(sc, mcp_attn_ctl_regs[i]);
+	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+		reg_val = REG_RD(sc, mcp_attn_ctl_regs[i].addr);
 
 		if (enable)
-			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+			reg_val |= mcp_attn_ctl_regs[i].bits;
 		else
-			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+			reg_val &= ~mcp_attn_ctl_regs[i].bits;
 
-		REG_WR(sc, mcp_attn_ctl_regs[i], reg_val);
+		REG_WR(sc, mcp_attn_ctl_regs[i].addr, reg_val);
 	}
 }
 
 static inline uint32_t ecore_parity_reg_mask(struct bnx2x_softc *sc, int idx)
 {
-	if (CHIP_IS_E1H(sc))
+	if (CHIP_IS_E1(sc))
+		return ecore_blocks_parity_data[idx].reg_mask.e1;
+	else if (CHIP_IS_E1H(sc))
 		return ecore_blocks_parity_data[idx].reg_mask.e1h;
 	else if (CHIP_IS_E2(sc))
 		return ecore_blocks_parity_data[idx].reg_mask.e2;
@@ -733,9 +737,9 @@ static inline uint32_t ecore_parity_reg_mask(struct bnx2x_softc *sc, int idx)
 
 static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
 {
-	uint32_t i;
+	unsigned int i;
 
-	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+	for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) {
 		uint32_t dis_mask = ecore_parity_reg_mask(sc, i);
 
 		if (dis_mask) {
@@ -748,7 +752,7 @@ static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
 	}
 
 	/* Disable MCP parity attentions */
-	ecore_set_mcp_parity(sc, FALSE);
+	ecore_set_mcp_parity(sc, false);
 }
 
 /**
@@ -756,7 +760,7 @@ static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
  */
 static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
 {
-	uint32_t i;
+	unsigned int i;
 	uint32_t reg_val, mcp_aeu_bits =
 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
@@ -769,7 +773,7 @@ static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
 	REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
 	REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
 
-	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+	for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) {
 		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
 
 		if (reg_mask) {
@@ -799,9 +803,9 @@ static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
 
 static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc)
 {
-	uint32_t i;
+	unsigned int i;
 
-	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+	for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) {
 		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
 
 		if (reg_mask)
@@ -810,7 +814,7 @@ static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc)
 	}
 
 	/* Enable MCP parity attentions */
-	ecore_set_mcp_parity(sc, TRUE);
+	ecore_set_mcp_parity(sc, true);
 }
 
 
diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h
index 733ad1aa8..0945e7999 100644
--- a/drivers/net/bnx2x/ecore_init_ops.h
+++ b/drivers/net/bnx2x/ecore_init_ops.h
@@ -28,16 +28,19 @@ static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr,
 		REG_WR(sc, addr + i*4, data[i]);
 }
 
-static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
+static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr,
+				uint32_t len, uint8_t wb __rte_unused)
 {
 	if (DMAE_READY(sc))
 		ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
 
-	else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
+	/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+	else
+		ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
 }
 
 static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill,
-			    uint32_t len)
+			    uint32_t len, uint8_t wb)
 {
 	uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
 	uint32_t buf_len32 = buf_len/4;
@@ -48,7 +51,7 @@ static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill,
 	for (i = 0; i < len; i += buf_len32) {
 		uint32_t cur_len = min(buf_len32, len - i);
 
-		ecore_write_big_buf(sc, addr + i*4, cur_len);
+		ecore_write_big_buf(sc, addr + i * 4, cur_len, wb);
 	}
 }
 
@@ -57,7 +60,9 @@ static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32
 	if (DMAE_READY(sc))
 		ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
 
-	else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
+	/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+	else
+		ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
 }
 
 static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr,
@@ -135,9 +140,12 @@ static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr,
 	if (DMAE_READY(sc))
 		VIRT_WR_DMAE_LEN(sc, data, addr, len, 0);
 
-	else ecore_init_str_wr(sc, addr, data, len);
+	/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+	else
+		ecore_init_str_wr(sc, addr, data, len);
 }
 
+
 static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo,
 			uint32_t val_hi)
 {
@@ -215,11 +223,14 @@ static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t st
 			ecore_init_wr_wb(sc, addr, data, len);
 			break;
 		case OP_ZR:
+			ecore_init_fill(sc, addr, 0, op->zero.len, 0);
+			break;
 		case OP_WB_ZR:
-			ecore_init_fill(sc, addr, 0, op->zero.len);
+			ecore_init_fill(sc, addr, 0, op->zero.len, 1);
 			break;
 		case OP_ZP:
-			ecore_init_wr_zp(sc, addr, len, op->arr_wr.data_off);
+			ecore_init_wr_zp(sc, addr, len,
+					 op->arr_wr.data_off);
 			break;
 		case OP_WR_64:
 			ecore_init_wr_64(sc, addr, data, len);
@@ -241,11 +252,6 @@ static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t st
 				op->if_mode.mode_bit_map) == 0)
 				op_idx += op->if_mode.cmd_offset;
 			break;
-		    /* the following opcodes are unused at the moment. */
-		case OP_IF_PHASE:
-		case OP_RT:
-		case OP_DELAY:
-		case OP_VERIFY:
 		default:
 			/* Should never get here! */
 
@@ -490,7 +496,7 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
 	REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order);
 	REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order);
 
-	if (CHIP_IS_E1H(sc) && (r_order == MAX_RD_ORD))
+	if ((CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) && (r_order == MAX_RD_ORD))
 		REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
 
 	if (CHIP_IS_E3(sc))
@@ -500,31 +506,33 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
 	else
 		REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
 
-	/*    MPS      w_order     optimal TH      presently TH
-	 *    128         0             0               2
-	 *    256         1             1               3
-	 *    >=512       2             2               3
-	 */
-	/* DMAE is special */
-	if (!CHIP_IS_E1H(sc)) {
-		/* E2 can use optimal TH */
-		val = w_order;
-		REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val);
-	} else {
-		val = ((w_order == 0) ? 2 : 3);
-		REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2);
-	}
+	if (!CHIP_IS_E1(sc)) {
+		/*    MPS      w_order     optimal TH      presently TH
+		 *    128         0             0               2
+		 *    256         1             1               3
+		 *    >=512       2             2               3
+		 */
+		/* DMAE is special */
+		if (!CHIP_IS_E1H(sc)) {
+			/* E2 can use optimal TH */
+			val = w_order;
+			REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val);
+		} else {
+			val = ((w_order == 0) ? 2 : 3);
+			REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2);
+		}
 
-	REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
-	REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
+		REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
+	}
 
 	/* Validate number of tags suppoted by device */
 #define PCIE_REG_PCIER_TL_HDR_FC_ST		0x2980
@@ -559,18 +567,15 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
 #define ILT_ADDR2(x)		((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
 #define ILT_RANGE(f, l)		(((l) << 10) | f)
 
-static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc,
-				 struct ilt_line *line, uint32_t size, uint8_t memop, int cli_num, int i)
+static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc __rte_unused,
+				 struct ilt_line *line, uint32_t size,
+				 uint8_t memop)
 {
-#define ECORE_ILT_NAMESIZE 10
-	char str[ECORE_ILT_NAMESIZE];
-
 	if (memop == ILT_MEMOP_FREE) {
 		ECORE_ILT_FREE(line->page, line->page_mapping, line->size);
 		return 0;
 	}
-	snprintf(str, ECORE_ILT_NAMESIZE, "ILT_%d_%d", cli_num, i);
-	ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size, str);
+	ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size);
 	if (!line->page)
 		return -1;
 	line->size = size;
@@ -581,7 +586,7 @@ static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc,
 static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num,
 				   uint8_t memop)
 {
-	int i, rc = 0;
+	int i, rc;
 	struct ecore_ilt *ilt = SC_ILT(sc);
 	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
 
@@ -591,25 +596,13 @@ static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num,
 	if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
 		return 0;
 
-	for (i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+	for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
 		rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i],
-					   ilt_cli->page_size, memop, cli_num, i);
+					   ilt_cli->page_size, memop);
 	}
 	return rc;
 }
 
-static inline int ecore_ilt_mem_op_cnic(struct bnx2x_softc *sc, uint8_t memop)
-{
-	int rc = 0;
-
-	if (CONFIGURE_NIC_MODE(sc))
-		rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
-	if (!rc)
-		rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_TM, memop);
-
-	return rc;
-}
-
 static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop)
 {
 	int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop);
@@ -626,7 +619,10 @@ static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx,
 {
 	uint32_t reg;
 
-	reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+	if (CHIP_IS_E1(sc))
+		reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx * 8;
+	else
+		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx * 8;
 
 	ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
 }
@@ -637,6 +633,7 @@ static void ecore_ilt_line_init_op(struct bnx2x_softc *sc,
 	ecore_dma_addr_t	null_mapping;
 	int abs_idx = ilt->start_line + idx;
 
+
 	switch (initop) {
 	case INITOP_INIT:
 		/* set in the init-value array */
@@ -650,9 +647,10 @@ static void ecore_ilt_line_init_op(struct bnx2x_softc *sc,
 	}
 }
 
-static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc,
-				      struct ilt_client_info *ilt_cli,
-				      uint32_t ilt_start)
+static void ecore_ilt_boundary_init_op(struct bnx2x_softc *sc,
+				       struct ilt_client_info *ilt_cli,
+				       uint32_t ilt_start,
+				       uint8_t initop __rte_unused)
 {
 	uint32_t start_reg = 0;
 	uint32_t end_reg = 0;
@@ -661,7 +659,26 @@ static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc,
 	   CLEAR => SET and for now SET ~~ INIT */
 
 	/* find the appropriate regs */
-	switch (ilt_cli->client_num) {
+	if (CHIP_IS_E1(sc)) {
+		switch (ilt_cli->client_num) {
+		case ILT_CLIENT_CDU:
+			start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
+			break;
+		case ILT_CLIENT_QM:
+			start_reg = PXP2_REG_PSWRQ_QM0_L2P;
+			break;
+		case ILT_CLIENT_SRC:
+			start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
+			break;
+		case ILT_CLIENT_TM:
+			start_reg = PXP2_REG_PSWRQ_TM0_L2P;
+			break;
+		}
+		REG_WR(sc, start_reg + SC_FUNC(sc) * 4,
+		       ILT_RANGE((ilt_start + ilt_cli->start),
+				 (ilt_start + ilt_cli->end)));
+	} else {
+		switch (ilt_cli->client_num) {
 		case ILT_CLIENT_CDU:
 			start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
 			end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
@@ -678,9 +695,10 @@ static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc,
 			start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
 			end_reg = PXP2_REG_RQ_TM_LAST_ILT;
 			break;
+		}
+		REG_WR(sc, start_reg, (ilt_start + ilt_cli->start));
+		REG_WR(sc, end_reg, (ilt_start + ilt_cli->end));
 	}
-	REG_WR(sc, start_reg, (ilt_start + ilt_cli->start));
-	REG_WR(sc, end_reg, (ilt_start + ilt_cli->end));
 }
 
 static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
@@ -697,7 +715,7 @@ static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
 		ecore_ilt_line_init_op(sc, ilt, i, initop);
 
 	/* init/clear the ILT boundries */
-	ecore_ilt_boundry_init_op(sc, ilt_cli, ilt->start_line);
+	ecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop);
 }
 
 static void ecore_ilt_client_init_op(struct bnx2x_softc *sc,
@@ -717,13 +735,6 @@ static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc,
 	ecore_ilt_client_init_op(sc, ilt_cli, initop);
 }
 
-static inline void ecore_ilt_init_op_cnic(struct bnx2x_softc *sc, uint8_t initop)
-{
-	if (CONFIGURE_NIC_MODE(sc))
-		ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
-	ecore_ilt_client_id_init_op(sc, ILT_CLIENT_TM, initop);
-}
-
 static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop)
 {
 	ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop);
@@ -771,7 +782,7 @@ static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
 /****************************************************************************
 * QM initializations
 ****************************************************************************/
-#define QM_QUEUES_PER_FUNC	16
+#define QM_QUEUES_PER_FUNC	16 /* E1 has 32, but only 16 are used */
 #define QM_INIT_MIN_CID_COUNT	31
 #define QM_INIT(cid_cnt)	(cid_cnt > QM_INIT_MIN_CID_COUNT)
 
@@ -831,33 +842,4 @@ static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
 	}
 }
 
-/****************************************************************************
-* SRC initializations
-****************************************************************************/
-#ifdef ECORE_L5
-/* called during init func stage */
-static void ecore_src_init_t2(struct bnx2x_softc *sc, struct src_ent *t2,
-			      ecore_dma_addr_t t2_mapping, int src_cid_count)
-{
-	int i;
-	int port = SC_PORT(sc);
-
-	/* Initialize T2 */
-	for (i = 0; i < src_cid_count-1; i++)
-		t2[i].next = (uint64_t)(t2_mapping +
-			     (i+1)*sizeof(struct src_ent));
-
-	/* tell the searcher where the T2 table is */
-	REG_WR(sc, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
-
-	ecore_wr_64(sc, SRC_REG_FIRSTFREE0 + port*16,
-		    U64_LO(t2_mapping), U64_HI(t2_mapping));
-
-	ecore_wr_64(sc, SRC_REG_LASTFREE0 + port*16,
-		    U64_LO((uint64_t)t2_mapping +
-			   (src_cid_count-1) * sizeof(struct src_ent)),
-		    U64_HI((uint64_t)t2_mapping +
-			   (src_cid_count-1) * sizeof(struct src_ent)));
-}
-#endif
 #endif /* ECORE_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/ecore_mfw_req.h b/drivers/net/bnx2x/ecore_mfw_req.h
index fe9450481..4ffd9daf7 100644
--- a/drivers/net/bnx2x/ecore_mfw_req.h
+++ b/drivers/net/bnx2x/ecore_mfw_req.h
@@ -14,7 +14,6 @@
 #define ECORE_MFW_REQ_H
 
 
-
 #define PORT_0              0
 #define PORT_1              1
 #define PORT_MAX            2
@@ -143,6 +142,15 @@ struct iscsi_stats_info {
 	uint8_t mac_add1[8];		/* Additional Programmed MAC Addr 1. */
 	/* QoS Priority (per 802.1p). 0-7255 */
 	uint32_t qos_priority;
+#define ISCSI_QOS_PRIORITY_OFFSET	0
+#define ISCSI_QOS_PRIORITY_MASK		(0xffff)
+
+#define ISCSI_IP_ADDRESS_TYPE_OFFSET	30
+#define ISCSI_IP_ADDRESS_TYPE_MASK	(3 << 30)
+/* Driver does not have the IP address and type populated */
+#define ISCSI_IP_ADDRESS_TYPE_NOT_SET	(0 << 30)
+#define ISCSI_IP_ADDRESS_TYPE_IPV4	(1 << 30) /* IPV4 IP address set */
+#define ISCSI_IP_ADDRESS_TYPE_IPV6	(2 << 30) /* IPV6 IP address set */
 
 	uint8_t initiator_name[64];	/* iSCSI Boot Initiator Node name. */
 
@@ -181,5 +189,4 @@ union drv_info_to_mcp {
 	struct iscsi_stats_info		iscsi_stat;
 };
 
-
 #endif /* ECORE_MFW_REQ_H */
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index ceac82815..b9bca9115 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -501,7 +501,7 @@ static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
  *
  * @details May sleep. Claims and releases execution queue lock during its run.
  */
-static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
+int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
 				      struct ecore_vlan_mac_obj *o)
 {
 	int rc;
@@ -712,7 +712,7 @@ static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
 	return rx_tx_flag;
 }
 
-static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
+void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
 				 int add, unsigned char *dev_addr, int index)
 {
 	uint32_t wb_data[2];
@@ -2764,12 +2764,16 @@ static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
 
 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
 				  struct ecore_mcast_ramrod_params *p,
-				  int old_num_bins)
+				  int old_num_bins,
+				  enum ecore_mcast_cmd cmd)
 {
 	struct ecore_mcast_obj *o = p->mcast_obj;
 
 	o->set_registry_size(o, old_num_bins);
 	o->total_pending_num -= p->mcast_list_len;
+
+	if (cmd == ECORE_MCAST_CMD_SET)
+		o->total_pending_num -= o->max_cmd_len;
 }
 
 /**
@@ -2915,7 +2919,8 @@ static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
 
 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
 				   __rte_unused struct ecore_mcast_ramrod_params
-				   *p, __rte_unused int old_num_bins)
+				   *p, __rte_unused int old_num_bins,
+				   __rte_unused enum ecore_mcast_cmd cmd)
 {
 	/* Do nothing */
 }
@@ -3093,7 +3098,7 @@ int ecore_config_mcast(struct bnx2x_softc *sc,
 	r->clear_pending(r);
 
 error_exit1:
-	o->revert(sc, p, old_reg_size);
+	o->revert(sc, p, old_reg_size, cmd);
 
 	return rc;
 }
@@ -3350,7 +3355,7 @@ static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
  * If credit is negative pool operations will always succeed (unlimited pool).
  *
  */
-static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
+void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
 				   int base, int credit)
 {
 	/* Zero the object first */
@@ -3588,11 +3593,13 @@ int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
 	return rc;
 }
 
-void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
+void ecore_init_rss_config_obj(struct bnx2x_softc *sc __rte_unused,
+			       struct ecore_rss_config_obj *rss_obj,
 			       uint8_t cl_id, uint32_t cid, uint8_t func_id,
-			       uint8_t engine_id, void *rdata,
-			       ecore_dma_addr_t rdata_mapping, int state,
-			       unsigned long *pstate, ecore_obj_type type)
+			       uint8_t engine_id,
+			       void *rdata, ecore_dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       ecore_obj_type type)
 {
 	ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
 			   rdata_mapping, state, pstate, type);
@@ -5107,8 +5114,14 @@ static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_fu
 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
 
 	/* Fill the ramrod data with provided parameters */
-	rdata->tx_switch_suspend_change_flg = 1;
-	rdata->tx_switch_suspend = switch_update_params->suspend;
+	if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+			   &switch_update_params->changes)) {
+		rdata->tx_switch_suspend_change_flg = 1;
+		rdata->tx_switch_suspend =
+			ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
+				       &switch_update_params->changes);
+	}
+
 	rdata->echo = SWITCH_UPDATE;
 
 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
@@ -5220,7 +5233,7 @@ static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_st
 
 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
 	rdata->dcb_version = tx_start_params->dcb_version;
-	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0;
+	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
 
 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
 		rdata->traffic_type_to_priority_cos[i] =
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index fce715b6d..cc1db377a 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -135,16 +135,16 @@ typedef rte_spinlock_t ECORE_MUTEX_SPIN;
 #define SC_ILT(sc)  ((sc)->ilt)
 #define ILOG2(x)    bnx2x_ilog2(x)
 
-#define ECORE_ILT_ZALLOC(x, y, size, str)				\
+#define ECORE_ILT_ZALLOC(x, y, size)				\
 	do {								\
 		x = rte_malloc("", sizeof(struct bnx2x_dma), RTE_CACHE_LINE_SIZE); \
 		if (x) {						\
 			if (bnx2x_dma_alloc((struct bnx2x_softc *)sc,	\
 					  size, (struct bnx2x_dma *)x,	\
-					  str, RTE_CACHE_LINE_SIZE) != 0) { \
+					  "ILT", RTE_CACHE_LINE_SIZE) != 0) { \
 				rte_free(x);				\
 				x = NULL;				\
-				*y = 0;					\
+				*(y) = 0;				\
 			} else {					\
 				*y = ((struct bnx2x_dma *)x)->paddr;	\
 			}						\
@@ -161,7 +161,7 @@ typedef rte_spinlock_t ECORE_MUTEX_SPIN;
 		}							\
 	} while (0)
 
-#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
+#define ECORE_IS_VALID_ETHER_ADDR(_mac) true
 
 #define ECORE_IS_MF_SD_MODE   IS_MF_SD_MODE
 #define ECORE_IS_MF_SI_MODE   IS_MF_SI_MODE
@@ -238,11 +238,11 @@ typedef struct ecore_list_t
 	(_list)->cnt  = 0;     \
     } while (0)
 
-/* return TRUE if the element is the last on the list */
+/* return true if the element is the last on the list */
 #define ECORE_LIST_IS_LAST(_elem, _list) \
     (_elem == (_list)->tail)
 
-/* return TRUE if the list is empty */
+/* return true if the list is empty */
 #define ECORE_LIST_IS_EMPTY(_list) \
     ((_list)->cnt == 0)
 
@@ -413,9 +413,6 @@ enum {
     AFEX_UPDATE,
 };
 
-
-
-
 struct bnx2x_softc;
 struct eth_context;
 
@@ -461,11 +458,18 @@ enum {
 	ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
 	ECORE_FILTER_FCOE_ETH_START_SCHED,
 	ECORE_FILTER_FCOE_ETH_STOP_SCHED,
+#ifdef ECORE_CHAR_DEV
+	ECORE_FILTER_BYPASS_RX_MODE_PENDING,
+	ECORE_FILTER_BYPASS_MAC_PENDING,
+	ECORE_FILTER_BYPASS_RSS_CONF_PENDING,
+#endif
 	ECORE_FILTER_MCAST_PENDING,
 	ECORE_FILTER_MCAST_SCHED,
 	ECORE_FILTER_RSS_CONF_PENDING,
 	ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
-	ECORE_AFEX_PENDING_VIFSET_MCP_ACK
+	ECORE_AFEX_PENDING_VIFSET_MCP_ACK,
+	ECORE_FILTER_VXLAN_PENDING,
+	ECORE_FILTER_PVLAN_PENDING
 };
 
 struct ecore_raw_obj {
@@ -488,7 +492,7 @@ struct ecore_raw_obj {
 	int (*wait_comp)(struct bnx2x_softc *sc,
 			 struct ecore_raw_obj *o);
 
-	int (*check_pending)(struct ecore_raw_obj *o);
+	bool (*check_pending)(struct ecore_raw_obj *o);
 	void (*clear_pending)(struct ecore_raw_obj *o);
 	void (*set_pending)(struct ecore_raw_obj *o);
 };
@@ -509,10 +513,16 @@ struct ecore_vlan_mac_ramrod_data {
 	uint16_t vlan;
 };
 
+struct ecore_vxlan_fltr_ramrod_data {
+	uint8_t innermac[ETH_ALEN];
+	uint32_t vni;
+};
+
 union ecore_classification_ramrod_data {
 	struct ecore_mac_ramrod_data mac;
 	struct ecore_vlan_ramrod_data vlan;
 	struct ecore_vlan_mac_ramrod_data vlan_mac;
+	struct ecore_vxlan_fltr_ramrod_data vxlan_fltr;
 };
 
 /* VLAN_MAC commands */
@@ -541,6 +551,7 @@ union ecore_exe_queue_cmd_data {
 	struct ecore_vlan_mac_data vlan_mac;
 
 	struct {
+		/* TODO */
 	} mcast;
 };
 
@@ -642,7 +653,7 @@ struct ecore_vlan_mac_registry_elem {
 	ecore_list_entry_t	link;
 
 	/* Used to store the cam offset used for the mac/vlan/vlan-mac.
-	 * Relevant for 57711 only. VLANs and MACs share the
+	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
 	 * same CAM for these chips.
 	 */
 	int			cam_offset;
@@ -659,9 +670,18 @@ enum {
 	ECORE_ETH_MAC,
 	ECORE_ISCSI_ETH_MAC,
 	ECORE_NETQ_ETH_MAC,
+	ECORE_VLAN,
 	ECORE_DONT_CONSUME_CAM_CREDIT,
 	ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
 };
+/* When looking for matching filters, some flags are not interesting */
+#define ECORE_VLAN_MAC_CMP_MASK	(1 << ECORE_UC_LIST_MAC | \
+				 1 << ECORE_ETH_MAC | \
+				 1 << ECORE_ISCSI_ETH_MAC | \
+				 1 << ECORE_NETQ_ETH_MAC | \
+				 1 << ECORE_VLAN)
+#define ECORE_VLAN_MAC_CMP_FLAGS(flags) \
+	((flags) & ECORE_VLAN_MAC_CMP_MASK)
 
 struct ecore_vlan_mac_ramrod_params {
 	/* Object to run the command from */
@@ -685,7 +705,7 @@ struct ecore_vlan_mac_obj {
 	 * all these fields should only be accessed under the exe_queue lock
 	 */
 	uint8_t		head_reader; /* Num. of readers accessing head list */
-	int		head_exe_request; /* Pending execution request. */
+	bool		head_exe_request; /* Pending execution request. */
 	unsigned long	saved_ramrod_flags; /* Ramrods of pending execution */
 
 	/* Execution queue interface instance */
@@ -728,7 +748,7 @@ struct ecore_vlan_mac_obj {
 	/**
 	 * Checks if DEL-ramrod with the given params may be performed.
 	 *
-	 * @return TRUE if the element may be deleted
+	 * @return true if the element may be deleted
 	 */
 	struct ecore_vlan_mac_registry_elem *
 		(*check_del)(struct bnx2x_softc *sc,
@@ -738,9 +758,9 @@ struct ecore_vlan_mac_obj {
 	/**
 	 * Checks if DEL-ramrod with the given params may be performed.
 	 *
-	 * @return TRUE if the element may be deleted
+	 * @return true if the element may be deleted
 	 */
-	int (*check_move)(struct bnx2x_softc *sc,
+	bool (*check_move)(struct bnx2x_softc *sc,
 			   struct ecore_vlan_mac_obj *src_o,
 			   struct ecore_vlan_mac_obj *dst_o,
 			   union ecore_classification_ramrod_data *data);
@@ -749,10 +769,10 @@ struct ecore_vlan_mac_obj {
 	 *  Update the relevant credit object(s) (consume/return
 	 *  correspondingly).
 	 */
-	int (*get_credit)(struct ecore_vlan_mac_obj *o);
-	int (*put_credit)(struct ecore_vlan_mac_obj *o);
-	int (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
-	int (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
+	bool (*get_credit)(struct ecore_vlan_mac_obj *o);
+	bool (*put_credit)(struct ecore_vlan_mac_obj *o);
+	bool (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
+	bool (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
 
 	/**
 	 * Configures one rule in the ramrod data buffer.
@@ -838,6 +858,9 @@ enum {
 	ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
 };
 
+void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
+			  bool add, unsigned char *dev_addr, int index);
+
 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 
 /* RX_MODE ramrod special flags: set in rx_mode_flags field in
@@ -898,7 +921,7 @@ struct ecore_mcast_list_elem {
 
 union ecore_mcast_config_data {
 	uint8_t *mac;
-	uint8_t bin; /* used in a RESTORE flow */
+	uint8_t bin; /* used in a RESTORE/SET flows */
 };
 
 struct ecore_mcast_ramrod_params {
@@ -908,6 +931,14 @@ struct ecore_mcast_ramrod_params {
 	unsigned long ramrod_flags;
 
 	ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */
+	/** TODO:
+	 *      - rename it to macs_num.
+	 *      - Add a new command type for handling pending commands
+	 *        (remove "zero semantics").
+	 *
+	 *  Length of mcast_list. If zero and ADD_CONT command - post
+	 *  pending commands.
+	 */
 	int mcast_list_len;
 };
 
@@ -916,6 +947,15 @@ enum ecore_mcast_cmd {
 	ECORE_MCAST_CMD_CONT,
 	ECORE_MCAST_CMD_DEL,
 	ECORE_MCAST_CMD_RESTORE,
+
+	/* Following this, multicast configuration should equal to approx
+	 * the set of MACs provided [i.e., remove all else].
+	 * The two sub-commands are used internally to decide whether a given
+	 * bin is to be added or removed
+	 */
+	ECORE_MCAST_CMD_SET,
+	ECORE_MCAST_CMD_SET_ADD,
+	ECORE_MCAST_CMD_SET_DEL,
 };
 
 struct ecore_mcast_obj {
@@ -989,14 +1029,14 @@ struct ecore_mcast_obj {
 	/** Checks if there are more mcast MACs to be set or a previous
 	 *  command is still pending.
 	 */
-	int (*check_pending)(struct ecore_mcast_obj *o);
+	bool (*check_pending)(struct ecore_mcast_obj *o);
 
 	/**
 	 * Set/Clear/Check SCHEDULED state of the object
 	 */
 	void (*set_sched)(struct ecore_mcast_obj *o);
 	void (*clear_sched)(struct ecore_mcast_obj *o);
-	int (*check_sched)(struct ecore_mcast_obj *o);
+	bool (*check_sched)(struct ecore_mcast_obj *o);
 
 	/* Wait until all pending commands complete */
 	int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o);
@@ -1015,7 +1055,8 @@ struct ecore_mcast_obj {
 	 */
 	void (*revert)(struct bnx2x_softc *sc,
 		       struct ecore_mcast_ramrod_params *p,
-		       int old_num_bins);
+		       int old_num_bins,
+		       enum ecore_mcast_cmd cmd);
 
 	int (*get_registry_size)(struct ecore_mcast_obj *o);
 	void (*set_registry_size)(struct ecore_mcast_obj *o, int n);
@@ -1045,33 +1086,33 @@ struct ecore_credit_pool_obj {
 	/**
 	 * Get the next free pool entry.
 	 *
-	 * @return TRUE if there was a free entry in the pool
+	 * @return true if there was a free entry in the pool
 	 */
-	int (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
+	bool (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
 
 	/**
 	 * Return the entry back to the pool.
 	 *
-	 * @return TRUE if entry is legal and has been successfully
+	 * @return true if entry is legal and has been successfully
 	 *         returned to the pool.
 	 */
-	int (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
+	bool (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
 
 	/**
 	 * Get the requested amount of credit from the pool.
 	 *
 	 * @param cnt Amount of requested credit
-	 * @return TRUE if the operation is successful
+	 * @return true if the operation is successful
 	 */
-	int (*get)(struct ecore_credit_pool_obj *o, int cnt);
+	bool (*get)(struct ecore_credit_pool_obj *o, int cnt);
 
 	/**
 	 * Returns the credit to the pool.
 	 *
 	 * @param cnt Amount of credit to return
-	 * @return TRUE if the operation is successful
+	 * @return true if the operation is successful
 	 */
-	int (*put)(struct ecore_credit_pool_obj *o, int cnt);
+	bool (*put)(struct ecore_credit_pool_obj *o, int cnt);
 
 	/**
 	 * Reads the current amount of credit.
@@ -1094,7 +1135,9 @@ enum {
 	ECORE_RSS_IPV6_TCP,
 	ECORE_RSS_IPV6_UDP,
 
-	ECORE_RSS_TUNNELING,
+	ECORE_RSS_IPV4_VXLAN,
+	ECORE_RSS_IPV6_VXLAN,
+	ECORE_RSS_TUNN_INNER_HDRS,
 };
 
 struct ecore_config_rss_params {
@@ -1117,10 +1160,6 @@ struct ecore_config_rss_params {
 
 	/* valid only if ECORE_RSS_UPDATE_TOE is set */
 	uint16_t		toe_rss_bitmap;
-
-	/* valid if ECORE_RSS_TUNNELING is set */
-	uint16_t		tunnel_value;
-	uint16_t		tunnel_mask;
 };
 
 struct ecore_rss_config_obj {
@@ -1158,6 +1197,8 @@ enum {
 	ECORE_Q_UPDATE_SILENT_VLAN_REM,
 	ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
 	ECORE_Q_UPDATE_TX_SWITCHING,
+	ECORE_Q_UPDATE_PTP_PKTS_CHNG,
+	ECORE_Q_UPDATE_PTP_PKTS,
 };
 
 /* Allowed Queue states */
@@ -1222,12 +1263,16 @@ enum {
 	ECORE_Q_FLG_FORCE_DEFAULT_PRI,
 	ECORE_Q_FLG_REFUSE_OUTBAND_VLAN,
 	ECORE_Q_FLG_PCSUM_ON_PKT,
-	ECORE_Q_FLG_TUN_INC_INNER_IP_ID
+	ECORE_Q_FLG_TUN_INC_INNER_IP_ID,
+	ECORE_Q_FLG_TPA_VLAN_DIS,
 };
 
 /* Queue type options: queue type may be a combination of below. */
 enum ecore_q_type {
 	ECORE_Q_TYPE_FWD,
+	/** TODO: Consider moving both these flags into the init()
+	 *        ramrod params.
+	 */
 	ECORE_Q_TYPE_HAS_RX,
 	ECORE_Q_TYPE_HAS_TX,
 };
@@ -1238,6 +1283,10 @@ enum ecore_q_type {
 #define ECORE_MULTI_TX_COS_E3B0			3
 #define ECORE_MULTI_TX_COS			3 /* Maximum possible */
 #define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
+/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
+ * timesync-related ramrods must not use this DMAE command ID.
+ */
+#define FW_DMAE_CMD_ID 6
 
 struct ecore_queue_init_params {
 	struct {
@@ -1280,6 +1329,26 @@ struct ecore_queue_update_params {
 	uint8_t		cid_index;
 };
 
+struct ecore_queue_update_tpa_params {
+	ecore_dma_addr_t sge_map;
+	uint8_t update_ipv4;
+	uint8_t update_ipv6;
+	uint8_t max_tpa_queues;
+	uint8_t max_sges_pkt;
+	uint8_t complete_on_both_clients;
+	uint8_t dont_verify_thr;
+	uint8_t tpa_mode;
+	uint8_t _pad;
+
+	uint16_t sge_buff_sz;
+	uint16_t max_agg_sz;
+
+	uint16_t sge_pause_thr_low;
+	uint16_t sge_pause_thr_high;
+
+	uint8_t disable_tpa_over_vlan;
+};
+
 struct rxq_pause_params {
 	uint16_t		bd_th_lo;
 	uint16_t		bd_th_hi;
@@ -1298,11 +1367,14 @@ struct ecore_general_setup_params {
 	uint8_t		spcl_id;
 	uint16_t		mtu;
 	uint8_t		cos;
+
+	uint8_t		fp_hsi;
 };
 
 struct ecore_rxq_setup_params {
 	/* dma */
 	ecore_dma_addr_t	dscr_map;
+	ecore_dma_addr_t	sge_map;
 	ecore_dma_addr_t	rcq_map;
 	ecore_dma_addr_t	rcq_np_map;
 
@@ -1313,6 +1385,8 @@ struct ecore_rxq_setup_params {
 
 	/* valid if ECORE_Q_FLG_TPA */
 	uint16_t		tpa_agg_sz;
+	uint16_t		sge_buf_sz;
+	uint8_t		max_sges_pkt;
 	uint8_t		max_tpa_queues;
 	uint8_t		rss_engine_id;
 
@@ -1323,7 +1397,7 @@ struct ecore_rxq_setup_params {
 
 	uint8_t		sb_cq_index;
 
-	/* valid if BXN2X_Q_FLG_SILENT_VLAN_REM */
+	/* valid if ECORE_Q_FLG_SILENT_VLAN_REM */
 	uint16_t silent_removal_value;
 	uint16_t silent_removal_mask;
 };
@@ -1371,6 +1445,7 @@ struct ecore_queue_state_params {
 	/* Params according to the current command */
 	union {
 		struct ecore_queue_update_params	update;
+		struct ecore_queue_update_tpa_params    update_tpa;
 		struct ecore_queue_setup_params		setup;
 		struct ecore_queue_init_params		init;
 		struct ecore_queue_setup_tx_only_params	tx_only;
@@ -1450,6 +1525,24 @@ struct ecore_queue_sp_obj {
 };
 
 /********************** Function state update *********************************/
+
+/* UPDATE command options */
+enum {
+	ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+	ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
+	ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
+	ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
+	ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
+	ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
+	ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
+	ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+	ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+	ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+	ECORE_F_UPDATE_TUNNEL_INNER_RSS,
+	ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN_INNER_VNI,
+	ECORE_F_UPDATE_VLAN_FILTERING_PVID_CHNG,
+};
+
 /* Allowed Function states */
 enum ecore_func_state {
 	ECORE_F_STATE_RESET,
@@ -1470,6 +1563,7 @@ enum ecore_func_cmd {
 	ECORE_F_CMD_TX_STOP,
 	ECORE_F_CMD_TX_START,
 	ECORE_F_CMD_SWITCH_UPDATE,
+	ECORE_F_CMD_SET_TIMESYNC,
 	ECORE_F_CMD_MAX,
 };
 
@@ -1511,19 +1605,60 @@ struct ecore_func_start_params {
 	/* Function cos mode */
 	uint8_t network_cos_mode;
 
-	/* NVGRE classification enablement */
-	uint8_t nvgre_clss_en;
+	/* DMAE command id to be used for FW DMAE transactions */
+	uint8_t dmae_cmd_id;
+
+	/* UDP dest port for VXLAN */
+	uint16_t vxlan_dst_port;
+
+	/* UDP dest port for Geneve */
+	uint16_t geneve_dst_port;
+
+	/* Enable inner Rx classifications for L2GRE packets */
+	uint8_t inner_clss_l2gre;
+
+	/* Enable inner Rx classifications for L2-Geneve packets */
+	uint8_t inner_clss_l2geneve;
+
+	/* Enable inner Rx classification for vxlan packets */
+	uint8_t inner_clss_vxlan;
+
+	/* Enable RSS according to inner header */
+	uint8_t inner_rss;
+
+	/** Allows accepting of packets failing MF classification, possibly
+	 * only matching a given ethertype
+	 */
+	uint8_t class_fail;
+	uint16_t class_fail_ethtype;
+
+	/* Override priority of output packets */
+	uint8_t sd_vlan_force_pri;
+	uint8_t sd_vlan_force_pri_val;
+
+	/* Replace vlan's ethertype */
+	uint16_t sd_vlan_eth_type;
 
-	/* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
-	uint8_t gre_tunnel_mode;
+	/* Prevent inner vlans from being added by FW */
+	uint8_t no_added_tags;
 
-	/* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
-	uint8_t gre_tunnel_rss;
+	/* Inner-to-Outer vlan priority mapping */
+	uint8_t c2s_pri[MAX_VLAN_PRIORITIES];
+	uint8_t c2s_pri_default;
+	uint8_t c2s_pri_valid;
 
+	/* TX Vlan filtering configuration */
+	uint8_t tx_vlan_filtering_enable;
+	uint8_t tx_vlan_filtering_use_pvid;
 };
 
 struct ecore_func_switch_update_params {
-	uint8_t suspend;
+	unsigned long changes; /* ECORE_F_UPDATE_XX bits */
+	uint16_t vlan;
+	uint16_t vlan_eth_type;
+	uint8_t vlan_force_prio;
+	uint16_t vxlan_dst_port;
+	uint16_t geneve_dst_port;
 };
 
 struct ecore_func_afex_update_params {
@@ -1538,11 +1673,28 @@ struct ecore_func_afex_viflists_params {
 	uint8_t afex_vif_list_command;
 	uint8_t func_to_clear;
 };
+
 struct ecore_func_tx_start_params {
 	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
 	uint8_t dcb_enabled;
 	uint8_t dcb_version;
-	uint8_t dont_add_pri_0;
+	uint8_t dont_add_pri_0_en;
+	uint8_t dcb_outer_pri[MAX_TRAFFIC_TYPES];
+};
+
+struct ecore_func_set_timesync_params {
+	/* Reset, set or keep the current drift value */
+	uint8_t drift_adjust_cmd;
+	/* Dec, inc or keep the current offset */
+	uint8_t offset_cmd;
+	/* Drift value direction */
+	uint8_t add_sub_drift_adjust_value;
+	/* Drift, period and offset values to be used according to the commands
+	 * above.
+	 */
+	uint8_t drift_adjust_value;
+	uint32_t drift_adjust_period;
+	uint64_t offset_delta;
 };
 
 struct ecore_func_state_params {
@@ -1563,6 +1715,7 @@ struct ecore_func_state_params {
 		struct ecore_func_afex_update_params afex_update;
 		struct ecore_func_afex_viflists_params afex_viflists;
 		struct ecore_func_tx_start_params tx_start;
+		struct ecore_func_set_timesync_params set_timesync;
 	} params;
 };
 
@@ -1583,6 +1736,10 @@ struct ecore_func_sp_drv_ops {
 	void (*reset_hw_port)(struct bnx2x_softc *sc);
 	void (*reset_hw_func)(struct bnx2x_softc *sc);
 
+	/* Init/Free GUNZIP resources */
+	int (*gunzip_init)(struct bnx2x_softc *sc);
+	void (*gunzip_end)(struct bnx2x_softc *sc);
+
 	/* Prepare/Release FW resources */
 	int (*init_fw)(struct bnx2x_softc *sc);
 	void (*release_fw)(struct bnx2x_softc *sc);
@@ -1669,6 +1826,9 @@ void ecore_init_queue_obj(struct bnx2x_softc *sc,
 int ecore_queue_state_change(struct bnx2x_softc *sc,
 			     struct ecore_queue_state_params *params);
 
+int ecore_get_q_logical_state(struct bnx2x_softc *sc,
+			       struct ecore_queue_sp_obj *obj);
+
 /********************* VLAN-MAC ****************/
 void ecore_init_mac_obj(struct bnx2x_softc *sc,
 			struct ecore_vlan_mac_obj *mac_obj,
@@ -1677,6 +1837,34 @@ void ecore_init_mac_obj(struct bnx2x_softc *sc,
 			unsigned long *pstate, ecore_obj_type type,
 			struct ecore_credit_pool_obj *macs_pool);
 
+void ecore_init_vlan_obj(struct bnx2x_softc *sc,
+			 struct ecore_vlan_mac_obj *vlan_obj,
+			 uint8_t cl_id, uint32_t cid, uint8_t func_id,
+			 void *rdata,
+			 ecore_dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, ecore_obj_type type,
+			 struct ecore_credit_pool_obj *vlans_pool);
+
+void ecore_init_vlan_mac_obj(struct bnx2x_softc *sc,
+			     struct ecore_vlan_mac_obj *vlan_mac_obj,
+			     uint8_t cl_id, uint32_t cid, uint8_t func_id,
+			     void *rdata,
+			     ecore_dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, ecore_obj_type type,
+			     struct ecore_credit_pool_obj *macs_pool,
+			     struct ecore_credit_pool_obj *vlans_pool);
+
+void ecore_init_vxlan_fltr_obj(struct bnx2x_softc *sc,
+			       struct ecore_vlan_mac_obj *vlan_mac_obj,
+			       uint8_t cl_id, uint32_t cid, uint8_t func_id,
+			       void *rdata,
+			       ecore_dma_addr_t rdata_mapping, int state,
+			       unsigned long *pstate, ecore_obj_type type,
+			       struct ecore_credit_pool_obj *macs_pool,
+			       struct ecore_credit_pool_obj *vlans_pool);
+
+int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
+					struct ecore_vlan_mac_obj *o);
 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
 				  struct ecore_vlan_mac_obj *o);
 int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc,
@@ -1719,7 +1907,7 @@ void ecore_init_mcast_obj(struct bnx2x_softc *sc,
 /**
  * ecore_config_mcast - Configure multicast MACs list.
  *
- * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ * @cmd: command to execute: ECORE_MCAST_CMD_X
  *
  * May configure a new list
  * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up
@@ -1747,9 +1935,12 @@ void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
 				 struct ecore_credit_pool_obj *p, uint8_t func_id,
 				 uint8_t func_num);
+void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
+			    int base, int credit);
 
 /****************** RSS CONFIGURATION ****************/
-void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
+void ecore_init_rss_config_obj(struct bnx2x_softc *sc,
+			       struct ecore_rss_config_obj *rss_obj,
 			       uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
 			       void *rdata, ecore_dma_addr_t rdata_mapping,
 			       int state, unsigned long *pstate,
@@ -1763,5 +1954,24 @@ void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
 int ecore_config_rss(struct bnx2x_softc *sc,
 		     struct ecore_config_rss_params *p);
 
+/**
+ * ecore_get_rss_ind_table - Return the current ind_table configuration.
+ *
+ * @ind_table: buffer to fill with the current indirection
+ *                  table content. Should be at least
+ *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
+			     uint8_t *ind_table);
+
+#define PF_MAC_CREDIT_E2(sc, func_num)					\
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_MAC_CREDIT_CNT) / \
+	 (func_num) + GET_NUM_VFS_PER_PF(sc) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(sc, func_num)					 \
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_VLAN_CREDIT_CNT) / \
+	 (func_num) + GET_NUM_VFS_PER_PF(sc) * VF_VLAN_CREDIT_CNT)
+
+#define ECORE_PF_VLAN_CREDIT_VLAN_FILTERING				256
 
 #endif /* ECORE_SP_H */
-- 
2.18.0


  parent reply	other threads:[~2019-09-19 21:15 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-06  7:25 [dpdk-dev] [PATCH 0/5] net/bnx2x: update to latest FW Rasesh Mody
2019-09-06  7:25 ` [dpdk-dev] [PATCH 1/5] net/bnx2x: update and reorganize HW registers Rasesh Mody
2019-09-12 12:17   ` Jerin Jacob
2019-09-19 21:28     ` [dpdk-dev] [EXT] " Rasesh Mody
2019-09-23 10:47   ` [dpdk-dev] " Jerin Jacob
2019-09-06  7:25 ` [dpdk-dev] [PATCH 2/5] net/bnx2x: update HSI code Rasesh Mody
2019-09-06  7:25 ` [dpdk-dev] [PATCH 3/5] net/bnx2x: update to latest FW 7.13.11 Rasesh Mody
2019-09-12 12:25   ` Jerin Jacob
2019-09-06  7:25 ` [dpdk-dev] [PATCH 4/5] doc: cleanup SPDX license id usage in bnx2x guide Rasesh Mody
2019-09-06  7:25 ` [dpdk-dev] [PATCH 5/5] net/bnx2x: change PMD version to 1.1.0.1 Rasesh Mody
2019-09-12 12:11 ` [dpdk-dev] [PATCH 0/5] net/bnx2x: update to latest FW Jerin Jacob
2019-09-12 22:00   ` [dpdk-dev] [EXT] " Rasesh Mody
2019-09-19 21:11 ` [dpdk-dev] [PATCH v2 0/4] " Rasesh Mody
2019-09-23 16:33   ` Ferruh Yigit
2019-09-24 15:39     ` Jerin Jacob
2019-09-24 15:57       ` Ferruh Yigit
2019-09-24 16:30         ` Jerin Jacob
2019-09-24 16:51           ` Rasesh Mody
2019-09-19 21:11 ` [dpdk-dev] [PATCH v2 1/4] net/bnx2x: update and reorganize HW registers Rasesh Mody
2019-09-19 21:11 ` [dpdk-dev] [PATCH v2 2/4] net/bnx2x: update HSI code Rasesh Mody
2019-09-19 21:11 ` Rasesh Mody [this message]
2019-09-19 21:11 ` [dpdk-dev] [PATCH v2 4/4] doc: cleanup SPDX license id usage in bnx2x guide Rasesh Mody
2019-10-02 19:14 ` [dpdk-dev] [PATCH v3 0/3] net/bnx2x: update to latest FW Rasesh Mody
2019-10-03  5:57   ` Jerin Jacob
2019-10-03  6:48     ` Rasesh Mody
2019-10-04  9:32       ` Jerin Jacob
2019-10-02 19:14 ` [dpdk-dev] [PATCH v3 1/3] net/bnx2x: update and reorganize HW registers Rasesh Mody
2019-10-02 19:14 ` [dpdk-dev] [PATCH v3 2/3] net/bnx2x: update HSI code Rasesh Mody
2019-10-02 19:14 ` [dpdk-dev] [PATCH v3 3/3] net/bnx2x: update to latest FW 7.13.11 Rasesh Mody

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190919211157.1668-4-rmody@marvell.com \
    --to=rmody@marvell.com \
    --cc=GR-Everest-DPDK-Dev@marvell.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.