All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rasesh Mody <rasesh.mody@cavium.com>
To: dev@dpdk.org, ferruh.yigit@intel.com
Cc: Rasesh Mody <rasesh.mody@cavium.com>, Dept-EngDPDKDev@cavium.com
Subject: [PATCH 07/53] net/qede/base: interchangeably use SB between PF and VF
Date: Mon, 18 Sep 2017 18:29:47 -0700	[thread overview]
Message-ID: <1505784633-1171-8-git-send-email-rasesh.mody@cavium.com> (raw)
In-Reply-To: <1505784633-1171-1-git-send-email-rasesh.mody@cavium.com>

Status Block reallocation - allow a PF and its child VF to change SB
between them using new base driver APIs.

The changes that are inside base driver flows are:

New APIs ecore_int_igu_reset_cam() and ecore_int_igu_reset_cam_default()
added to reset IGU CAM.
 a. During hw_prepare(), driver would re-initialize the IGU CAM.
 b. During hw_stop(), driver would initialize the IGU CAM to default.

Use igu_sb_id instead of sb_idx [protocol index] to allow setting of
the timer-resolution in CAU[coalescing algorithm unit] for all SBs,
sb_idx could limit SBs 0-11 only to be able change their timer-resolution.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h         |   13 +-
 drivers/net/qede/base/ecore_dev.c     |   79 ++--
 drivers/net/qede/base/ecore_int.c     |  757 +++++++++++++++++++++++----------
 drivers/net/qede/base/ecore_int.h     |   71 +++-
 drivers/net/qede/base/ecore_int_api.h |   41 +-
 drivers/net/qede/base/ecore_l2.c      |   24 +-
 drivers/net/qede/base/ecore_l2.h      |   26 +-
 drivers/net/qede/base/ecore_l2_api.h  |    4 +-
 drivers/net/qede/base/ecore_sriov.c   |  134 +++---
 drivers/net/qede/base/ecore_vf.c      |   35 +-
 drivers/net/qede/base/ecore_vf.h      |   17 +
 drivers/net/qede/qede_rxtx.c          |    4 +-
 12 files changed, 808 insertions(+), 397 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 10fb16a..64a3416 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -279,7 +279,6 @@ struct ecore_qm_iids {
  * is received from MFW.
  */
 enum ecore_resources {
-	ECORE_SB,
 	ECORE_L2_QUEUE,
 	ECORE_VPORT,
 	ECORE_RSS_ENG,
@@ -293,7 +292,13 @@ enum ecore_resources {
 	ECORE_CMDQS_CQS,
 	ECORE_RDMA_STATS_QUEUE,
 	ECORE_BDQ,
-	ECORE_MAX_RESC,			/* must be last */
+
+	/* This is needed only internally for matching against the IGU.
+	 * In case of legacy MFW, would be set to `0'.
+	 */
+	ECORE_SB,
+
+	ECORE_MAX_RESC,
 };
 
 /* Features that require resources, given as input to the resource management
@@ -556,10 +561,6 @@ struct ecore_hwfn {
 	bool				b_rdma_enabled_in_prs;
 	u32				rdma_prs_search_reg;
 
-	/* Array of sb_info of all status blocks */
-	struct ecore_sb_info		*sbs_info[MAX_SB_PER_PF_MIMD];
-	u16				num_sbs;
-
 	struct ecore_cxt_mngr		*p_cxt_mngr;
 
 	/* Flag indicating whether interrupts are enabled or not*/
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 4a31d67..40b544b 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1232,7 +1232,7 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
 {
 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
-	int i, sb_id;
+	int i, igu_sb_id;
 
 	for_each_hwfn(p_dev, i) {
 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -1242,16 +1242,18 @@ static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
 
 		p_igu_info = p_hwfn->hw_info.p_igu_info;
 
-		for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
-		     sb_id++) {
-			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+		for (igu_sb_id = 0;
+		     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+		     igu_sb_id++) {
+			p_block = &p_igu_info->entry[igu_sb_id];
 
 			if (!p_block->is_pf)
 				continue;
 
 			ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
 						p_block->function_id, 0, 0);
-			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+			STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
+					 sb_entry);
 		}
 	}
 }
@@ -2255,6 +2257,13 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
 		ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
 		ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
 		ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+		rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
+		if (rc != ECORE_SUCCESS) {
+			DP_NOTICE(p_hwfn, true,
+				  "Failed to return IGU CAM to default\n");
+			rc2 = ECORE_UNKNOWN_ERROR;
+		}
+
 		/* Need to wait 1ms to guarantee SBs are cleared */
 		OSAL_MSLEEP(1);
 
@@ -2423,31 +2432,32 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
 static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
 {
 	u32 *feat_num = p_hwfn->hw_info.feat_num;
+	struct ecore_sb_cnt_info sb_cnt;
 	u32 non_l2_sbs = 0;
 
+	OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
+	ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
+
 	/* L2 Queues require each: 1 status block. 1 L2 queue */
 	if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
-		struct ecore_sb_cnt_info sb_cnt_info;
-
-		OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
-		ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
-
 		/* Start by allocating VF queues, then PF's */
 		feat_num[ECORE_VF_L2_QUE] =
 			OSAL_MIN_T(u32,
 				   RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
-				   sb_cnt_info.sb_iov_cnt);
+				   sb_cnt.iov_cnt);
 		feat_num[ECORE_PF_L2_QUE] =
 			OSAL_MIN_T(u32,
-				   RESC_NUM(p_hwfn, ECORE_SB) - non_l2_sbs,
+				   sb_cnt.cnt - non_l2_sbs,
 				   RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
 				   FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
 	}
 
-	feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
-					     RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
-	feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
-					     RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+	feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, sb_cnt.cnt,
+					     RESC_NUM(p_hwfn,
+						      ECORE_CMDQS_CQS));
+	feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, sb_cnt.cnt,
+					      RESC_NUM(p_hwfn,
+						       ECORE_CMDQS_CQS));
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
 		   "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
@@ -2456,14 +2466,12 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
 		   (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
 		   (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
 		   (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
-		   RESC_NUM(p_hwfn, ECORE_SB));
+		   (int)sb_cnt.cnt);
 }
 
 const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
 {
 	switch (res_id) {
-	case ECORE_SB:
-		return "SB";
 	case ECORE_L2_QUEUE:
 		return "L2_QUEUE";
 	case ECORE_VPORT:
@@ -2490,6 +2498,8 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
 		return "RDMA_STATS_QUEUE";
 	case ECORE_BDQ:
 		return "BDQ";
+	case ECORE_SB:
+		return "SB";
 	default:
 		return "UNKNOWN_RESOURCE";
 	}
@@ -2565,14 +2575,8 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
 {
 	u8 num_funcs = p_hwfn->num_funcs_on_engine;
 	bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
-	struct ecore_sb_cnt_info sb_cnt_info;
 
 	switch (res_id) {
-	case ECORE_SB:
-		OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
-		ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
-		*p_resc_num = sb_cnt_info.sb_cnt;
-		break;
 	case ECORE_L2_QUEUE:
 		*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
 				 MAX_NUM_L2_QUEUES_BB) / num_funcs;
@@ -2629,6 +2633,12 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
 		if (!*p_resc_num)
 			*p_resc_start = 0;
 		break;
+	case ECORE_SB:
+		/* Since we want its value to reflect whether MFW supports
+		 * the new scheme, have a default of 0.
+		 */
+		*p_resc_num = 0;
+		break;
 	default:
 		*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
 		break;
@@ -2693,14 +2703,9 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
 		goto out;
 	}
 
-	/* TBD - remove this when revising the handling of the SB resource */
-	if (res_id == ECORE_SB) {
-		/* Excluding the slowpath SB */
-		*p_resc_num -= 1;
-		*p_resc_start -= p_hwfn->enabled_func_idx;
-	}
-
-	if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
+	if ((*p_resc_num != dflt_resc_num ||
+	     *p_resc_start != dflt_resc_start) &&
+	    res_id != ECORE_SB) {
 		DP_INFO(p_hwfn,
 			"MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
 			res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
@@ -2850,6 +2855,10 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 		return ECORE_INVAL;
 	}
 
+	/* This will also learn the number of SBs from MFW */
+	if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt))
+		return ECORE_INVAL;
+
 	ecore_hw_set_feat(p_hwfn);
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
@@ -4540,7 +4549,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
 	timeset = (u8)(coalesce >> timer_res);
 
 	rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
-				     p_cid->abs.sb_idx, false);
+				     p_cid->sb_igu_id, false);
 	if (rc != ECORE_SUCCESS)
 		goto out;
 
@@ -4581,7 +4590,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
 	timeset = (u8)(coalesce >> timer_res);
 
 	rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
-				     p_cid->abs.sb_idx, true);
+				     p_cid->sb_igu_id, true);
 	if (rc != ECORE_SUCCESS)
 		goto out;
 
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index b57c510..f8b104a 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -1369,6 +1369,49 @@ void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
 	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
 }
 
+static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+				   struct ecore_ptt *p_ptt,
+				   u16 igu_sb_id, u32 pi_index,
+				   enum ecore_coalescing_fsm coalescing_fsm,
+				   u8 timeset)
+{
+	struct cau_pi_entry pi_entry;
+	u32 sb_offset, pi_offset;
+
+	if (IS_VF(p_hwfn->p_dev))
+		return;/* @@@TBD MichalK- VF CAU... */
+
+	sb_offset = igu_sb_id * PIS_PER_SB;
+	OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+	if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+	else
+		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+	pi_offset = sb_offset + pi_index;
+	if (p_hwfn->hw_init_done) {
+		ecore_wr(p_hwfn, p_ptt,
+			 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+			 *((u32 *)&(pi_entry)));
+	} else {
+		STORE_RT_REG(p_hwfn,
+			     CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+			     *((u32 *)&(pi_entry)));
+	}
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+			   struct ecore_ptt *p_ptt,
+			   struct ecore_sb_info *p_sb, u32 pi_index,
+			   enum ecore_coalescing_fsm coalescing_fsm,
+			   u8 timeset)
+{
+	_ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
+			       pi_index, coalescing_fsm, timeset);
+}
+
 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
 			   struct ecore_ptt *p_ptt,
 			   dma_addr_t sb_phys, u16 igu_sb_id,
@@ -1420,8 +1463,9 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
 		else
 			timer_res = 2;
 		timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
-		ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
-				      ECORE_COAL_RX_STATE_MACHINE, timeset);
+		_ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+				       ECORE_COAL_RX_STATE_MACHINE,
+				       timeset);
 
 		if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
 			timer_res = 0;
@@ -1431,46 +1475,14 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
 			timer_res = 2;
 		timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
 		for (i = 0; i < num_tc; i++) {
-			ecore_int_cau_conf_pi(p_hwfn, p_ptt,
-					      igu_sb_id, TX_PI(i),
-					      ECORE_COAL_TX_STATE_MACHINE,
-					      timeset);
+			_ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+					       igu_sb_id, TX_PI(i),
+					       ECORE_COAL_TX_STATE_MACHINE,
+					       timeset);
 		}
 	}
 }
 
-void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
-			   struct ecore_ptt *p_ptt,
-			   u16 igu_sb_id, u32 pi_index,
-			   enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
-{
-	struct cau_pi_entry pi_entry;
-	u32 sb_offset, pi_offset;
-
-	if (IS_VF(p_hwfn->p_dev))
-		return;		/* @@@TBD MichalK- VF CAU... */
-
-	sb_offset = igu_sb_id * PIS_PER_SB;
-	OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
-
-	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
-	if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
-		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
-	else
-		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
-
-	pi_offset = sb_offset + pi_index;
-	if (p_hwfn->hw_init_done) {
-		ecore_wr(p_hwfn, p_ptt,
-			 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
-			 *((u32 *)&(pi_entry)));
-	} else {
-		STORE_RT_REG(p_hwfn,
-			     CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
-			     *((u32 *)&(pi_entry)));
-	}
-}
-
 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
 			struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
 {
@@ -1483,16 +1495,50 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
 				      sb_info->igu_sb_id, 0, 0);
 }
 
-/**
- * @brief ecore_get_igu_sb_id - given a sw sb_id return the
- *        igu_sb_id
- *
- * @param p_hwfn
- * @param sb_id
- *
- * @return u16
- */
-static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
+{
+	struct ecore_igu_block *p_block;
+	u16 igu_id;
+
+	for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+	     igu_id++) {
+		p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+		    !(p_block->status & ECORE_IGU_STATUS_FREE))
+			continue;
+
+		if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
+		    b_is_pf)
+			return p_block;
+	}
+
+	return OSAL_NULL;
+}
+
+static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
+				  u16 vector_id)
+{
+	struct ecore_igu_block *p_block;
+	u16 igu_id;
+
+	for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+	     igu_id++) {
+		p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+		    !p_block->is_pf ||
+		    p_block->vector_number != vector_id)
+			continue;
+
+		return igu_id;
+	}
+
+	return ECORE_SB_INVALID_IDX;
+}
+
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
 {
 	u16 igu_sb_id;
 
@@ -1500,11 +1546,15 @@ static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
 	if (sb_id == ECORE_SP_SB_ID)
 		igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
 	else if (IS_PF(p_hwfn->p_dev))
-		igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+		igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
 	else
 		igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
 
-	if (sb_id == ECORE_SP_SB_ID)
+	if (igu_sb_id == ECORE_SB_INVALID_IDX)
+		DP_NOTICE(p_hwfn, true,
+			  "Slowpath SB vector %04x doesn't exist\n",
+			  sb_id);
+	else if (sb_id == ECORE_SP_SB_ID)
 		DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
 			   "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
 	else
@@ -1525,9 +1575,24 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
 
 	sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
 
+	if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
+		return ECORE_INVAL;
+
+	/* Let the igu info reference the client's SB info */
 	if (sb_id != ECORE_SP_SB_ID) {
-		p_hwfn->sbs_info[sb_id] = sb_info;
-		p_hwfn->num_sbs++;
+		if (IS_PF(p_hwfn->p_dev)) {
+			struct ecore_igu_info *p_info;
+			struct ecore_igu_block *p_block;
+
+			p_info = p_hwfn->hw_info.p_igu_info;
+			p_block = &p_info->entry[sb_info->igu_sb_id];
+
+			p_block->sb_info = sb_info;
+			p_block->status &= ~ECORE_IGU_STATUS_FREE;
+			p_info->usage.free_cnt--;
+		} else {
+			ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
+		}
 	}
 #ifdef ECORE_CONFIG_DIRECT_HWFN
 	sb_info->p_hwfn = p_hwfn;
@@ -1559,20 +1624,35 @@ enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
 					  struct ecore_sb_info *sb_info,
 					  u16 sb_id)
 {
-	if (sb_id == ECORE_SP_SB_ID) {
-		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
-		return ECORE_INVAL;
-	}
+	struct ecore_igu_info *p_info;
+	struct ecore_igu_block *p_block;
+
+	if (sb_info == OSAL_NULL)
+		return ECORE_SUCCESS;
 
 	/* zero status block and ack counter */
 	sb_info->sb_ack = 0;
 	OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
 
-	if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
-		p_hwfn->sbs_info[sb_id] = OSAL_NULL;
-		p_hwfn->num_sbs--;
+	if (IS_VF(p_hwfn->p_dev)) {
+		ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
+		return ECORE_SUCCESS;
 	}
 
+	p_info = p_hwfn->hw_info.p_igu_info;
+	p_block = &p_info->entry[sb_info->igu_sb_id];
+
+	/* Vector 0 is reserved to Default SB */
+	if (p_block->vector_number == 0) {
+		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+		return ECORE_INVAL;
+	}
+
+	/* Lose reference to client's SB info, and fix counters */
+	p_block->sb_info = OSAL_NULL;
+	p_block->status |= ECORE_IGU_STATUS_FREE;
+	p_info->usage.free_cnt++;
+
 	return ECORE_SUCCESS;
 }
 
@@ -1778,11 +1858,13 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
 
 #define IGU_CLEANUP_SLEEP_LENGTH		(1000)
 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
-			      struct ecore_ptt *p_ptt,
-			      u32 sb_id, bool cleanup_set, u16 opaque_fid)
+				     struct ecore_ptt *p_ptt,
+				     u32 igu_sb_id,
+				     bool cleanup_set,
+				     u16 opaque_fid)
 {
 	u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
-	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
 	u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
 	u8 type = 0;		/* FIXME MichalS type??? */
 
@@ -1813,8 +1895,8 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
 	OSAL_MMIOWB(p_hwfn->p_dev);
 
 	/* calculate where to read the status bit from */
-	sb_bit = 1 << (sb_id % 32);
-	sb_bit_addr = sb_id / 32 * sizeof(u32);
+	sb_bit = 1 << (igu_sb_id % 32);
+	sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
 
 	sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
 
@@ -1829,21 +1911,28 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
 	if (!sleep_cnt)
 		DP_NOTICE(p_hwfn, true,
 			  "Timeout waiting for clear status 0x%08x [for sb %d]\n",
-			  val, sb_id);
+			  val, igu_sb_id);
 }
 
 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
 				       struct ecore_ptt *p_ptt,
-				       u32 sb_id, u16 opaque, bool b_set)
+				       u16 igu_sb_id, u16 opaque, bool b_set)
 {
+	struct ecore_igu_block *p_block;
 	int pi, i;
 
+	p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+		   "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
+		   igu_sb_id, p_block->function_id, p_block->is_pf,
+		   p_block->vector_number);
+
 	/* Set */
 	if (b_set)
-		ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+		ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
 
 	/* Clear */
-	ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+	ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
 
 	/* Wait for the IGU SB to cleanup */
 	for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
@@ -1851,8 +1940,8 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
 
 		val = ecore_rd(p_hwfn, p_ptt,
 			       IGU_REG_WRITE_DONE_PENDING +
-			       ((sb_id / 32) * 4));
-		if (val & (1 << (sb_id % 32)))
+			       ((igu_sb_id / 32) * 4));
+		if (val & (1 << (igu_sb_id % 32)))
 			OSAL_UDELAY(10);
 		else
 			break;
@@ -1860,21 +1949,22 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
 	if (i == IGU_CLEANUP_SLEEP_LENGTH)
 		DP_NOTICE(p_hwfn, true,
 			  "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
-			  sb_id);
+			  igu_sb_id);
 
 	/* Clear the CAU for the SB */
 	for (pi = 0; pi < 12; pi++)
 		ecore_wr(p_hwfn, p_ptt,
-			 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+			 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
 }
 
 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
 				struct ecore_ptt *p_ptt,
 				bool b_set, bool b_slowpath)
 {
-	u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
-	u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
-	u32 sb_id = 0, val = 0;
+	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+	struct ecore_igu_block *p_block;
+	u16 igu_sb_id = 0;
+	u32 val = 0;
 
 	/* @@@TBD MichalK temporary... should be moved to init-tool... */
 	val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
@@ -1883,53 +1973,204 @@ void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
 	ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
 	/* end temporary */
 
-	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
-		   "IGU cleaning SBs [%d,...,%d]\n",
-		   igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+	for (igu_sb_id = 0;
+	     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+	     igu_sb_id++) {
+		p_block = &p_info->entry[igu_sb_id];
 
-	for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
-		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+		    !p_block->is_pf ||
+		    (p_block->status & ECORE_IGU_STATUS_DSB))
+			continue;
+
+		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
 						  p_hwfn->hw_info.opaque_fid,
 						  b_set);
+	}
 
-	if (!b_slowpath)
-		return;
+	if (b_slowpath)
+		ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+						  p_info->igu_dsb_id,
+						  p_hwfn->hw_info.opaque_fid,
+						  b_set);
+}
 
-	sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
-	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
-		   "IGU cleaning slowpath SB [%d]\n", sb_id);
-	ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
-					  p_hwfn->hw_info.opaque_fid, b_set);
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+			    struct ecore_ptt *p_ptt)
+{
+	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+	struct ecore_igu_block *p_block;
+	int pf_sbs, vf_sbs;
+	u16 igu_sb_id;
+	u32 val, rval;
+
+	if (!RESC_NUM(p_hwfn, ECORE_SB)) {
+		/* We're using an old MFW - have to prevent any switching
+		 * of SBs between PF and VFs as later driver wouldn't be
+		 * able to tell which belongs to which.
+		 */
+		p_info->b_allow_pf_vf_change = false;
+	} else {
+		/* Use the numbers the MFW have provided -
+		 * don't forget MFW accounts for the default SB as well.
+		 */
+		p_info->b_allow_pf_vf_change = true;
+
+		if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
+			DP_INFO(p_hwfn,
+				"MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+				RESC_NUM(p_hwfn, ECORE_SB) - 1,
+				p_info->usage.cnt);
+			p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
+		}
+
+		/* TODO - how do we learn about VF SBs from MFW? */
+		if (IS_PF_SRIOV(p_hwfn)) {
+			u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+			if (vfs != p_info->usage.iov_cnt)
+				DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+					   "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+					   p_info->usage.iov_cnt, vfs);
+
+			/* At this point we know how many SBs we have totally
+			 * in IGU + number of PF SBs. So we can validate that
+			 * we'd have sufficient for VF.
+			 */
+			if (vfs > p_info->usage.free_cnt +
+				  p_info->usage.free_cnt_iov -
+				  p_info->usage.cnt) {
+				DP_NOTICE(p_hwfn, true,
+					  "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+					  p_info->usage.free_cnt +
+					  p_info->usage.free_cnt_iov,
+					  p_info->usage.cnt, vfs);
+				return ECORE_INVAL;
+			}
+		}
+	}
+
+	/* Cap the number of VFs SBs by the number of VFs */
+	if (IS_PF_SRIOV(p_hwfn))
+		p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+	/* Mark all SBs as free, now in the right PF/VFs division */
+	p_info->usage.free_cnt = p_info->usage.cnt;
+	p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+	p_info->usage.orig = p_info->usage.cnt;
+	p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+	/* We now proceed to re-configure the IGU cam to reflect the initial
+	 * configuration. We can start with the Default SB.
+	 */
+	pf_sbs = p_info->usage.cnt;
+	vf_sbs = p_info->usage.iov_cnt;
+
+	for (igu_sb_id = p_info->igu_dsb_id;
+	     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+	     igu_sb_id++) {
+		p_block = &p_info->entry[igu_sb_id];
+		val = 0;
+
+		if (!(p_block->status & ECORE_IGU_STATUS_VALID))
+			continue;
+
+		if (p_block->status & ECORE_IGU_STATUS_DSB) {
+			p_block->function_id = p_hwfn->rel_pf_id;
+			p_block->is_pf = 1;
+			p_block->vector_number = 0;
+			p_block->status = ECORE_IGU_STATUS_VALID |
+					  ECORE_IGU_STATUS_PF |
+					  ECORE_IGU_STATUS_DSB;
+		} else if (pf_sbs) {
+			pf_sbs--;
+			p_block->function_id = p_hwfn->rel_pf_id;
+			p_block->is_pf = 1;
+			p_block->vector_number = p_info->usage.cnt - pf_sbs;
+			p_block->status = ECORE_IGU_STATUS_VALID |
+					  ECORE_IGU_STATUS_PF |
+					  ECORE_IGU_STATUS_FREE;
+		} else if (vf_sbs) {
+			p_block->function_id =
+				p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
+				p_info->usage.iov_cnt - vf_sbs;
+			p_block->is_pf = 0;
+			p_block->vector_number = 0;
+			p_block->status = ECORE_IGU_STATUS_VALID |
+					  ECORE_IGU_STATUS_FREE;
+			vf_sbs--;
+		} else {
+			p_block->function_id = 0;
+			p_block->is_pf = 0;
+			p_block->vector_number = 0;
+		}
+
+		SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+			  p_block->function_id);
+		SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+		SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+			  p_block->vector_number);
+
+		/* VF entries would be enabled when VF is initializaed */
+		SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+		rval = ecore_rd(p_hwfn, p_ptt,
+				IGU_REG_MAPPING_MEMORY +
+				sizeof(u32) * igu_sb_id);
+
+		if (rval != val) {
+			ecore_wr(p_hwfn, p_ptt,
+				 IGU_REG_MAPPING_MEMORY +
+				 sizeof(u32) * igu_sb_id,
+				 val);
+
+			DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+				   "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+				   igu_sb_id, p_block->function_id,
+				   p_block->is_pf, p_block->vector_number,
+				   rval, val);
+		}
+	}
+
+	return 0;
+}
+
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+				    struct ecore_ptt *p_ptt)
+{
+	struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
+
+	/* Return all the usage indications to default prior to the reset;
+	 * The reset expects the !orig to reflect the initial status of the
+	 * SBs, and would re-calculate the originals based on those.
+	 */
+	p_cnt->cnt = p_cnt->orig;
+	p_cnt->free_cnt = p_cnt->orig;
+	p_cnt->iov_cnt = p_cnt->iov_orig;
+	p_cnt->free_cnt_iov = p_cnt->iov_orig;
+	p_cnt->orig = 0;
+	p_cnt->iov_orig = 0;
+
+	/* TODO - we probably need to re-configure the CAU as well... */
+	return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
 }
 
-static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
-					struct ecore_ptt *p_ptt, u16 sb_id)
+static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+					 struct ecore_ptt *p_ptt,
+					 u16 igu_sb_id)
 {
 	u32 val = ecore_rd(p_hwfn, p_ptt,
-			   IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+			   IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
 	struct ecore_igu_block *p_block;
 
-	p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
-
-	/* stop scanning when hit first invalid PF entry */
-	if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
-	    GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
-		goto out;
+	p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
 
 	/* Fill the block information */
-	p_block->status = ECORE_IGU_STATUS_VALID;
 	p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
 	p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
 	p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
 
-	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
-		   "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
-		   " is_pf = %d vector_num = 0x%x\n",
-		   sb_id, val, p_block->function_id, p_block->is_pf,
-		   p_block->vector_number);
-
-out:
-	return val;
+	p_block->igu_sb_id = igu_sb_id;
 }
 
 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
@@ -1937,140 +2178,217 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
 {
 	struct ecore_igu_info *p_igu_info;
 	struct ecore_igu_block *p_block;
-	u32 min_vf = 0, max_vf = 0, val;
-	u16 sb_id, last_iov_sb_id = 0;
-	u16 prev_sb_id = 0xFF;
+	u32 min_vf = 0, max_vf = 0;
+	u16 igu_sb_id;
 
-	p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
-						GFP_KERNEL,
-						sizeof(*p_igu_info));
+	p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
+						 GFP_KERNEL,
+						 sizeof(*p_igu_info));
 	if (!p_hwfn->hw_info.p_igu_info)
 		return ECORE_NOMEM;
-
-	OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
-
 	p_igu_info = p_hwfn->hw_info.p_igu_info;
 
-	/* Initialize base sb / sb cnt for PFs and VFs */
-	p_igu_info->igu_base_sb = 0xffff;
-	p_igu_info->igu_sb_cnt = 0;
-	p_igu_info->igu_dsb_id = 0xffff;
-	p_igu_info->igu_base_sb_iov = 0xffff;
+	/* Distinguish between existent and onn-existent default SB */
+	p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
 
+	/* Find the range of VF ids whose SB belong to this PF */
 	if (p_hwfn->p_dev->p_iov_info) {
 		struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
 
 		min_vf = p_iov->first_vf_in_pf;
 		max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
 	}
-	for (sb_id = 0;
-	     sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
-	     sb_id++) {
-		p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
-		val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
-		if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
-		    GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
-			break;
 
-		if (p_block->is_pf) {
-			if (p_block->function_id == p_hwfn->rel_pf_id) {
-				p_block->status |= ECORE_IGU_STATUS_PF;
-
-				if (p_block->vector_number == 0) {
-					if (p_igu_info->igu_dsb_id == 0xffff)
-						p_igu_info->igu_dsb_id = sb_id;
-				} else {
-					if (p_igu_info->igu_base_sb == 0xffff) {
-						p_igu_info->igu_base_sb = sb_id;
-					} else if (prev_sb_id != sb_id - 1) {
-						DP_NOTICE(p_hwfn->p_dev, false,
-							  "consecutive igu"
-							  " vectors for HWFN"
-							  " %x broken",
-							  p_hwfn->rel_pf_id);
-						break;
-					}
-					prev_sb_id = sb_id;
-					/* we don't count the default */
-					(p_igu_info->igu_sb_cnt)++;
-				}
-			}
-		} else {
-			if ((p_block->function_id >= min_vf) &&
-			    (p_block->function_id < max_vf)) {
-				/* Available for VFs of this PF */
-				if (p_igu_info->igu_base_sb_iov == 0xffff) {
-					p_igu_info->igu_base_sb_iov = sb_id;
-				} else if (last_iov_sb_id != sb_id - 1) {
-					if (!val)
-						DP_VERBOSE(p_hwfn->p_dev,
-							   ECORE_MSG_INTR,
-							   "First uninited IGU"
-							   " CAM entry at"
-							   " index 0x%04x\n",
-							   sb_id);
-					else
-						DP_NOTICE(p_hwfn->p_dev, false,
-							  "Consecutive igu"
-							  " vectors for HWFN"
-							  " %x vfs is broken"
-							  " [jumps from %04x"
-							  " to %04x]\n",
-							  p_hwfn->rel_pf_id,
-							  last_iov_sb_id,
-							  sb_id);
-					break;
-				}
-				p_block->status |= ECORE_IGU_STATUS_FREE;
-				p_hwfn->hw_info.p_igu_info->free_blks++;
-				last_iov_sb_id = sb_id;
-			}
+	for (igu_sb_id = 0;
+	     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+	     igu_sb_id++) {
+		/* Read current entry; Notice it might not belong to this PF */
+		ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
+		p_block = &p_igu_info->entry[igu_sb_id];
+
+		if ((p_block->is_pf) &&
+		    (p_block->function_id == p_hwfn->rel_pf_id)) {
+			p_block->status = ECORE_IGU_STATUS_PF |
+					  ECORE_IGU_STATUS_VALID |
+					  ECORE_IGU_STATUS_FREE;
+
+			if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+				p_igu_info->usage.cnt++;
+		} else if (!(p_block->is_pf) &&
+			   (p_block->function_id >= min_vf) &&
+			   (p_block->function_id < max_vf)) {
+			/* Available for VFs of this PF */
+			p_block->status = ECORE_IGU_STATUS_VALID |
+					  ECORE_IGU_STATUS_FREE;
+
+			if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+				p_igu_info->usage.iov_cnt++;
+		}
+
+		/* Mark the First entry belonging to the PF or its VFs
+		 * as the default SB [we'll reset IGU prior to first usage].
+		 */
+		if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
+		    (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
+			p_igu_info->igu_dsb_id = igu_sb_id;
+			p_block->status |= ECORE_IGU_STATUS_DSB;
 		}
+
+		/* While this isn't suitable for all clients, limit number
+		 * of prints by having each PF print only its entries with the
+		 * exception of PF0 which would print everything.
+		 */
+		if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
+		    (p_hwfn->abs_pf_id == 0))
+			DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+				   "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+				   igu_sb_id, p_block->function_id,
+				   p_block->is_pf, p_block->vector_number);
+	}
+
+	if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
+		DP_NOTICE(p_hwfn, true,
+			  "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
+			  p_igu_info->igu_dsb_id);
+		return ECORE_INVAL;
+	}
+
+	/* All non default SB are considered free at this point */
+	p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+	p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+		   "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
+		   p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
+		   p_igu_info->usage.iov_cnt);
+
+	return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			  u16 sb_id, bool b_to_vf)
+{
+	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+	struct ecore_igu_block *p_block = OSAL_NULL;
+	u16 igu_sb_id = 0, vf_num = 0;
+	u32 val = 0;
+
+	if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
+		return ECORE_INVAL;
+
+	if (sb_id == ECORE_SP_SB_ID)
+		return ECORE_INVAL;
+
+	if (!p_info->b_allow_pf_vf_change) {
+		DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
+		return ECORE_INVAL;
 	}
 
-	/* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
-	 * the number of VF SBs [especially for first VF on engine, as we can't
-	 * diffrentiate between empty entries and its entries].
-	 * Since we don't really support more SBs than VFs today, prevent any
-	 * such configuration by sanitizing the number of SBs to equal the
-	 * number of VFs.
+	/* If we're moving a SB from PF to VF, the client had to specify
+	 * which vector it wants to move.
 	 */
-	if (IS_PF_SRIOV(p_hwfn)) {
-		u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
-
-		if (total_vfs < p_igu_info->free_blks) {
-			DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
-				   "Limiting number of SBs for IOV - %04x --> %04x\n",
-				   p_igu_info->free_blks,
-				   p_hwfn->p_dev->p_iov_info->total_vfs);
-			p_igu_info->free_blks = total_vfs;
-		} else if (total_vfs > p_igu_info->free_blks) {
-			DP_NOTICE(p_hwfn, true,
-				  "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
-				  p_igu_info->free_blks, total_vfs);
+	if (b_to_vf) {
+		igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
+		if (igu_sb_id == ECORE_SB_INVALID_IDX)
 			return ECORE_INVAL;
-		}
 	}
 
-	p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+	/* If we're moving a SB from VF to PF, need to validate there isn't
+	 * already a line configured for that vector.
+	 */
+	if (!b_to_vf) {
+		if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
+		    ECORE_SB_INVALID_IDX)
+			return ECORE_INVAL;
+	}
 
-	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
-		   "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
-		   "igu_dsb_id=0x%x\n",
-		   p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
-		   p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
-		   p_igu_info->igu_dsb_id);
-
-	if (p_igu_info->igu_base_sb == 0xffff ||
-	    p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
-		DP_NOTICE(p_hwfn, true,
-			  "IGU CAM returned invalid values igu_base_sb=0x%x "
-			  "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
-			  p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
-			  p_igu_info->igu_dsb_id);
+	/* We need to validate that the SB can actually be relocated.
+	 * This would also handle the previous case where we've explicitly
+	 * stated which IGU SB needs to move.
+	 */
+	for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+	     igu_sb_id++) {
+		p_block = &p_info->entry[igu_sb_id];
+
+		if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+		    !(p_block->status & ECORE_IGU_STATUS_FREE) ||
+		    (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
+			if (b_to_vf)
+				return ECORE_INVAL;
+			else
+				continue;
+		}
+
+		break;
+	}
+
+	if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
+		DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+			   "Failed to find a free SB to move\n");
 		return ECORE_INVAL;
 	}
 
+	/* At this point, p_block points to the SB we want to relocate */
+	if (b_to_vf) {
+		p_block->status &= ~ECORE_IGU_STATUS_PF;
+
+		/* It doesn't matter which VF number we choose, since we're
+		 * going to disable the line; But let's keep it in range.
+		 */
+		vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
+
+		p_block->function_id = (u8)vf_num;
+		p_block->is_pf = 0;
+		p_block->vector_number = 0;
+
+		p_info->usage.cnt--;
+		p_info->usage.free_cnt--;
+		p_info->usage.iov_cnt++;
+		p_info->usage.free_cnt_iov++;
+
+		/* TODO - if SBs aren't really the limiting factor,
+		 * then it might not be accurate [in the since that
+		 * we might not need decrement the feature].
+		 */
+		p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
+		p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
+	} else {
+		p_block->status |= ECORE_IGU_STATUS_PF;
+		p_block->function_id = p_hwfn->rel_pf_id;
+		p_block->is_pf = 1;
+		p_block->vector_number = sb_id + 1;
+
+		p_info->usage.cnt++;
+		p_info->usage.free_cnt++;
+		p_info->usage.iov_cnt--;
+		p_info->usage.free_cnt_iov--;
+
+		p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
+		p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
+	}
+
+	/* Update the IGU and CAU with the new configuration */
+	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+		  p_block->function_id);
+	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+	SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+	SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+		  p_block->vector_number);
+
+	ecore_wr(p_hwfn, p_ptt,
+		 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
+		 val);
+
+	ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
+			      igu_sb_id, vf_num,
+			      p_block->is_pf ? 0 : 1);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+		   "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+		   igu_sb_id, p_block->function_id,
+		   p_block->is_pf, p_block->vector_number);
+
 	return ECORE_SUCCESS;
 }
 
@@ -2170,14 +2488,13 @@ void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
 			   struct ecore_sb_cnt_info *p_sb_cnt_info)
 {
-	struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
+	struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
 
-	if (!info || !p_sb_cnt_info)
+	if (!p_igu_info || !p_sb_cnt_info)
 		return;
 
-	p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
-	p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
-	p_sb_cnt_info->sb_free_blk = info->free_blks;
+	OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
+		    sizeof(*p_sb_cnt_info));
 }
 
 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 067ed60..b655685 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -21,31 +21,76 @@
 #define SB_ALIGNED_SIZE(p_hwfn)					\
 	ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
 
+#define ECORE_SB_INVALID_IDX	0xffff
+
 struct ecore_igu_block {
 	u8 status;
 #define ECORE_IGU_STATUS_FREE	0x01
 #define ECORE_IGU_STATUS_VALID	0x02
 #define ECORE_IGU_STATUS_PF	0x04
+#define ECORE_IGU_STATUS_DSB	0x08
 
 	u8 vector_number;
 	u8 function_id;
 	u8 is_pf;
-};
 
-struct ecore_igu_map {
-	struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+	/* Index inside IGU [meant for back reference] */
+	u16 igu_sb_id;
+
+	struct ecore_sb_info *sb_info;
 };
 
 struct ecore_igu_info {
-	struct ecore_igu_map igu_map;
+	struct ecore_igu_block entry[MAX_TOT_SB_PER_PATH];
 	u16 igu_dsb_id;
-	u16 igu_base_sb;
-	u16 igu_base_sb_iov;
-	u16 igu_sb_cnt;
-	u16 igu_sb_cnt_iov;
-	u16 free_blks;
+
+	/* The numbers can shift when using APIs to switch SBs between PF and
+	 * VF.
+	 */
+	struct ecore_sb_cnt_info usage;
+
+	/* Determine whether we can shift SBs between VFs and PFs */
+	bool b_allow_pf_vf_change;
 };
 
+/**
+ * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+			    struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Make sure IGU CAM reflects the default resources once again,
+ *          starting with a 'dirty' SW database.
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+				    struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ *
+ * @param p_hwfn
+ * @param sb_id - user provided sb_id
+ *
+ * @return an index inside IGU CAM where the SB resides
+ */
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief return a pointer to an unused valid SB
+ *
+ * @param p_hwfn
+ * @param b_is_pf - true iff we want a SB belonging to a PF
+ *
+ * @return point to an igu_block, OSAL_NULL if none is available
+ */
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf);
 /* TODO Names of function may change... */
 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
 				struct ecore_ptt *p_ptt,
@@ -125,9 +170,11 @@ enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
  * @param opaque	- opaque fid of the sb owner.
  * @param cleanup_set	- set(1) / clear(0)
  */
-void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
-				       struct ecore_ptt *p_ptt,
-				       u32 sb_id, u16 opaque, bool b_set);
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn	*p_hwfn,
+				       struct ecore_ptt		*p_ptt,
+				       u16			sb_id,
+				       u16			opaque,
+				       bool			b_set);
 
 /**
  * @brief ecore_int_cau_conf - configure cau for a given status
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index 799fbe8..49d0fac 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -48,9 +48,15 @@ struct ecore_sb_info_dbg {
 };
 
 struct ecore_sb_cnt_info {
-	int sb_cnt;
-	int sb_iov_cnt;
-	int sb_free_blk;
+	/* Original, current, and free SBs for PF */
+	int orig;
+	int cnt;
+	int free_cnt;
+
+	/* Original, current and free SBS for child VFs */
+	int iov_orig;
+	int iov_cnt;
+	int free_cnt_iov;
 };
 
 static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
@@ -173,17 +179,17 @@ enum ecore_coalescing_fsm {
  *
  * @param p_hwfn
  * @param p_ptt
- * @param igu_sb_id
+ * @param p_sb
  * @param pi_index
  * @param state
  * @param timeset
  */
-void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
-			   struct ecore_ptt *p_ptt,
-			   u16 igu_sb_id,
-			   u32 pi_index,
-			   enum ecore_coalescing_fsm coalescing_fsm,
-			   u8 timeset);
+void ecore_int_cau_conf_pi(struct ecore_hwfn		*p_hwfn,
+			   struct ecore_ptt		*p_ptt,
+			   struct ecore_sb_info		*p_sb,
+			   u32				pi_index,
+			   enum ecore_coalescing_fsm	coalescing_fsm,
+			   u8				timeset);
 
 /**
  *
@@ -219,6 +225,7 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
 
 #define ECORE_SP_SB_ID 0xffff
+
 /**
  * @brief ecore_int_sb_init - Initializes the sb_info structure.
  *
@@ -324,4 +331,18 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
 					  struct ecore_sb_info *p_sb,
 					  struct ecore_sb_info_dbg *p_info);
 
+/**
+ * @brief - Move a free Status block between PF and child VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming
+ *                from VF, given-up if moving to VF]
+ * @param b_to_vf - PF->VF == true, VF->PF == false
+ *
+ * @return ECORE_SUCCESS if SB successfully moved.
+ */
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			  u16 sb_id, bool b_to_vf);
 #endif
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 839bd46..3140fdd 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -207,9 +207,15 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 
 	p_cid->opaque_fid = opaque_fid;
 	p_cid->cid = cid;
-	p_cid->rel = *p_params;
 	p_cid->p_owner = p_hwfn;
 
+	/* Fill in parameters */
+	p_cid->rel.vport_id = p_params->vport_id;
+	p_cid->rel.queue_id = p_params->queue_id;
+	p_cid->rel.stats_id = p_params->stats_id;
+	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+	p_cid->sb_idx = p_params->sb_idx;
+
 	/* Fill-in bits related to VFs' queues if information was provided */
 	if (p_vf_params != OSAL_NULL) {
 		p_cid->vfid = p_vf_params->vfid;
@@ -251,10 +257,6 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 		p_cid->abs.stats_id = p_cid->rel.stats_id;
 	}
 
-	/* SBs relevant information was already provided as absolute */
-	p_cid->abs.sb = p_cid->rel.sb;
-	p_cid->abs.sb_idx = p_cid->rel.sb_idx;
-
 out:
 	/* VF-images have provided the qid_usage_idx on their own.
 	 * Otherwise, we need to allocate a unique one.
@@ -273,7 +275,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 		   p_cid->rel.queue_id,	p_cid->qid_usage_idx,
 		   p_cid->abs.queue_id,
 		   p_cid->rel.stats_id, p_cid->abs.stats_id,
-		   p_cid->abs.sb, p_cid->abs.sb_idx);
+		   p_cid->sb_igu_id, p_cid->sb_idx);
 
 	return p_cid;
 
@@ -901,7 +903,7 @@ enum _ecore_status_t
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 		   "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
 		   p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
-		   p_cid->abs.vport_id, p_cid->abs.sb);
+		   p_cid->abs.vport_id, p_cid->sb_igu_id);
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -917,8 +919,8 @@ enum _ecore_status_t
 
 	p_ramrod = &p_ent->ramrod.rx_queue_start;
 
-	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
-	p_ramrod->sb_index = p_cid->abs.sb_idx;
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+	p_ramrod->sb_index = p_cid->sb_idx;
 	p_ramrod->vport_id = p_cid->abs.vport_id;
 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
 	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
@@ -1153,8 +1155,8 @@ enum _ecore_status_t
 	p_ramrod = &p_ent->ramrod.tx_queue_start;
 	p_ramrod->vport_id = p_cid->abs.vport_id;
 
-	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
-	p_ramrod->sb_index = p_cid->abs.sb_idx;
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+	p_ramrod->sb_index = p_cid->sb_idx;
 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
 
 	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 33f1fad..02aa5e8 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -18,7 +18,16 @@
 #define MAX_QUEUES_PER_QZONE	(sizeof(unsigned long) * 8)
 #define ECORE_QUEUE_CID_PF	(0xff)
 
-/* Additional parameters required for initialization of the queue_cid
+/* Almost identical to the ecore_queue_start_common_params,
+ * but here we maintain the SB index in IGU CAM.
+ */
+struct ecore_queue_cid_params {
+	u8 vport_id;
+	u16 queue_id;
+	u8 stats_id;
+};
+
+ /* Additional parameters required for initialization of the queue_cid
  * and are relevant only for a PF initializing one for its VFs.
  */
 struct ecore_queue_cid_vf_params {
@@ -44,13 +53,14 @@ struct ecore_queue_cid_vf_params {
 };
 
 struct ecore_queue_cid {
-	/* 'Relative' is a relative term ;-). Usually the indices [not counting
-	 * SBs] would be PF-relative, but there are some cases where that isn't
-	 * the case - specifically for a PF configuring its VF indices it's
-	 * possible some fields [E.g., stats-id] in 'rel' would already be abs.
-	 */
-	struct ecore_queue_start_common_params rel;
-	struct ecore_queue_start_common_params abs;
+	/* For stats-id, the `rel' is actually absolute as well */
+	struct ecore_queue_cid_params rel;
+	struct ecore_queue_cid_params abs;
+
+	/* These have no 'relative' meaning */
+	u16 sb_igu_id;
+	u8 sb_idx;
+
 	u32 cid;
 	u16 opaque_fid;
 
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index d09f3c4..a6740d5 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -11,6 +11,7 @@
 
 #include "ecore_status.h"
 #include "ecore_sp_api.h"
+#include "ecore_int_api.h"
 
 #ifndef __EXTRACT__LINUX__
 enum ecore_rss_caps {
@@ -35,8 +36,7 @@ struct ecore_queue_start_common_params {
 	/* Relative, but relevant only for PFs */
 	u8 stats_id;
 
-	/* These are always absolute */
-	u16 sb;
+	struct ecore_sb_info *p_sb;
 	u8 sb_idx;
 };
 
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 0886560..1ec6451 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -446,33 +446,6 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
 	return ECORE_SUCCESS;
 }
 
-static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
-					  struct ecore_ptt *p_ptt)
-{
-	struct ecore_igu_block *p_sb;
-	u16 sb_id;
-	u32 val;
-
-	if (!p_hwfn->hw_info.p_igu_info) {
-		DP_ERR(p_hwfn,
-		       "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
-		return;
-	}
-
-	for (sb_id = 0;
-	     sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
-		p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
-		if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
-		    !(p_sb->status & ECORE_IGU_STATUS_PF)) {
-			val = ecore_rd(p_hwfn, p_ptt,
-				       IGU_REG_MAPPING_MEMORY + sb_id * 4);
-			SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
-			ecore_wr(p_hwfn, p_ptt,
-				 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
-		}
-	}
-}
-
 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
@@ -634,7 +607,6 @@ void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 		return;
 
 	ecore_iov_setup_vfdb(p_hwfn);
-	ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
 }
 
 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
@@ -938,46 +910,38 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
 				     struct ecore_vf_info *vf,
 				     u16 num_rx_queues)
 {
-	struct ecore_igu_block *igu_blocks;
-	int qid = 0, igu_id = 0;
+	struct ecore_igu_block *p_block;
+	struct cau_sb_entry sb_entry;
+	int qid = 0;
 	u32 val = 0;
 
-	igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
-	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
-		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
-
-	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+		num_rx_queues =
+		(u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+	p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
 
 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
 
-	while ((qid < num_rx_queues) &&
-	       (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
-		if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
-			struct cau_sb_entry sb_entry;
-
-			vf->igu_sbs[qid] = (u16)igu_id;
-			igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
-
-			SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
-			ecore_wr(p_hwfn, p_ptt,
-				 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
-				 val);
-
-			/* Configure igu sb in CAU which were marked valid */
-			ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
-						p_hwfn->rel_pf_id,
-						vf->abs_vf_id, 1);
-			ecore_dmae_host2grc(p_hwfn, p_ptt,
-					    (u64)(osal_uintptr_t)&sb_entry,
-					    CAU_REG_SB_VAR_MEMORY +
-					    igu_id * sizeof(u64), 2, 0);
-			qid++;
-		}
-		igu_id++;
+	for (qid = 0; qid < num_rx_queues; qid++) {
+		p_block = ecore_get_igu_free_sb(p_hwfn, false);
+		vf->igu_sbs[qid] = p_block->igu_sb_id;
+		p_block->status &= ~ECORE_IGU_STATUS_FREE;
+		SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+		ecore_wr(p_hwfn, p_ptt,
+			 IGU_REG_MAPPING_MEMORY +
+			 sizeof(u32) * p_block->igu_sb_id, val);
+
+		/* Configure igu sb in CAU which were marked valid */
+		ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+					p_hwfn->rel_pf_id,
+					vf->abs_vf_id, 1);
+		ecore_dmae_host2grc(p_hwfn, p_ptt,
+				    (u64)(osal_uintptr_t)&sb_entry,
+				    CAU_REG_SB_VAR_MEMORY +
+				    p_block->igu_sb_id * sizeof(u64), 2, 0);
 	}
 
 	vf->num_sbs = (u8)num_rx_queues;
@@ -1013,10 +977,8 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
 		ecore_wr(p_hwfn, p_ptt, addr, val);
 
-		p_info->igu_map.igu_blocks[igu_id].status |=
-		    ECORE_IGU_STATUS_FREE;
-
-		p_hwfn->hw_info.p_igu_info->free_blks++;
+		p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
+		p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
 	}
 
 	vf->num_sbs = 0;
@@ -1114,34 +1076,28 @@ enum _ecore_status_t
 	vf->vport_id = p_params->vport_id;
 	vf->rss_eng_id = p_params->rss_eng_id;
 
-	/* Perform sanity checking on the requested queue_id */
+	/* Since it's possible to relocate SBs, it's a bit difficult to check
+	 * things here. Simply check whether the index falls in the range
+	 * belonging to the PF.
+	 */
 	for (i = 0; i < p_params->num_queues; i++) {
-		u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
-		u16 max_vf_qzone = min_vf_qzone +
-				   FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
-
 		qid = p_params->req_rx_queue[i];
-		if (qid < min_vf_qzone || qid > max_vf_qzone) {
+		if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
 			DP_NOTICE(p_hwfn, true,
-				  "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+				  "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
 				  qid, p_params->rel_vf_id,
-				  min_vf_qzone, max_vf_qzone);
+				  (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
 			return ECORE_INVAL;
 		}
 
 		qid = p_params->req_tx_queue[i];
-		if (qid > max_vf_qzone) {
+		if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
 			DP_NOTICE(p_hwfn, true,
-				  "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
-				  qid, p_params->rel_vf_id, max_vf_qzone);
+				  "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+				  qid, p_params->rel_vf_id,
+				  (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
 			return ECORE_INVAL;
 		}
-
-		/* If client *really* wants, Tx qid can be shared with PF */
-		if (qid < min_vf_qzone)
-			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-				   "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
-				   p_params->rel_vf_id, qid, i);
 	}
 
 	/* Limit number of queues according to number of CIDs */
@@ -2233,6 +2189,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	struct ecore_vf_queue *p_queue;
 	struct vfpf_start_rxq_tlv *req;
 	struct ecore_queue_cid *p_cid;
+	struct ecore_sb_info sb_dummy;
 	enum _ecore_status_t rc;
 
 	req = &mbx->req_virt->start_rxq;
@@ -2257,7 +2214,11 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	params.queue_id = (u8)p_queue->fw_rx_qid;
 	params.vport_id = vf->vport_id;
 	params.stats_id = vf->abs_vf_id + 0x10;
-	params.sb = req->hw_sb;
+
+	/* Since IGU index is passed via sb_info, construct a dummy one */
+	OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+	sb_dummy.igu_sb_id = req->hw_sb;
+	params.p_sb = &sb_dummy;
 	params.sb_idx = req->sb_index;
 
 	OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
@@ -2500,6 +2461,7 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	struct ecore_vf_queue *p_queue;
 	struct vfpf_start_txq_tlv *req;
 	struct ecore_queue_cid *p_cid;
+	struct ecore_sb_info sb_dummy;
 	u8 qid_usage_idx, vf_legacy;
 	u32 cid = 0;
 	enum _ecore_status_t rc;
@@ -2527,7 +2489,11 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	params.queue_id = p_queue->fw_tx_qid;
 	params.vport_id = vf->vport_id;
 	params.stats_id = vf->abs_vf_id + 0x10;
-	params.sb = req->hw_sb;
+
+	/* Since IGU index is passed via sb_info, construct a dummy one */
+	OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+	sb_dummy.igu_sb_id = req->hw_sb;
+	params.p_sb = &sb_dummy;
 	params.sb_idx = req->sb_index;
 
 	OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index e4e2517..0a26141 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -652,8 +652,8 @@ enum _ecore_status_t
 	req->cqe_pbl_addr = cqe_pbl_addr;
 	req->cqe_pbl_size = cqe_pbl_size;
 	req->rxq_addr = bd_chain_phys_addr;
-	req->hw_sb = p_cid->rel.sb;
-	req->sb_index = p_cid->rel.sb_idx;
+	req->hw_sb = p_cid->sb_igu_id;
+	req->sb_index = p_cid->sb_idx;
 	req->bd_max_bytes = bd_max_bytes;
 	req->stat_id = -1; /* Keep initialized, for future compatibility */
 
@@ -774,8 +774,8 @@ enum _ecore_status_t
 	/* Tx */
 	req->pbl_addr = pbl_addr;
 	req->pbl_size = pbl_size;
-	req->hw_sb = p_cid->rel.sb;
-	req->sb_index = p_cid->rel.sb_idx;
+	req->hw_sb = p_cid->sb_igu_id;
+	req->sb_index = p_cid->sb_idx;
 
 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
 
@@ -930,9 +930,12 @@ enum _ecore_status_t
 	req->only_untagged = only_untagged;
 
 	/* status blocks */
-	for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
-		if (p_hwfn->sbs_info[i])
-			req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+	for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
+		struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
+
+		if (p_sb)
+			req->sb_addr[i] = p_sb->sb_phys;
+	}
 
 	/* add list termination tlv */
 	ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -1501,6 +1504,24 @@ u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
 	return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
 }
 
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+			  u16 sb_id, struct ecore_sb_info *p_sb)
+{
+	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+	if (!p_iov) {
+		DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+		return;
+	}
+
+	if (sb_id >= PFVF_MAX_SBS_PER_VF) {
+		DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id);
+		return;
+	}
+
+	p_iov->sbs_info[sb_id] = p_sb;
+}
+
 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
 					    u8 *p_change)
 {
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 4096d5d..d9ee96b 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -41,6 +41,14 @@ struct ecore_vf_iov {
 	 * this has to be propagated as it affects the fastpath.
 	 */
 	bool b_pre_fp_hsi;
+
+	/* Current day VFs are passing the SBs physical address on vport
+	 * start, and as they lack an IGU mapping they need to store the
+	 * addresses of previously registered SBs.
+	 * Even if we were to change configuration flow, due to backward
+	 * compatibility [with older PFs] we'd still need to store these.
+	 */
+	struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
 };
 
 
@@ -205,6 +213,15 @@ enum _ecore_status_t
 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
 			   u16               sb_id);
 
+/**
+ * @brief Stores [or removes] a configured sb_info.
+ *
+ * @param p_hwfn
+ * @param sb_id - zero-based SB index [for fastpath]
+ * @param sb_info - may be OSAL_NULL [during removal].
+ */
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+			  u16 sb_id, struct ecore_sb_info *p_sb);
 
 /**
  * @brief ecore_vf_pf_vport_start - perform vport start for VF.
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 5c3613c..8ce89e5 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -555,7 +555,7 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 		params.queue_id = rx_queue_id / edev->num_hwfns;
 		params.vport_id = 0;
 		params.stats_id = params.vport_id;
-		params.sb = fp->sb_info->igu_sb_id;
+		params.p_sb = fp->sb_info;
 		DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
 				fp->rxq->queue_id, fp->sb_info->igu_sb_id);
 		params.sb_idx = RX_PI;
@@ -614,7 +614,7 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 		params.queue_id = tx_queue_id / edev->num_hwfns;
 		params.vport_id = 0;
 		params.stats_id = params.vport_id;
-		params.sb = fp->sb_info->igu_sb_id;
+		params.p_sb = fp->sb_info;
 		DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
 				fp->txq->queue_id, fp->sb_info->igu_sb_id);
 		params.sb_idx = TX_PI(0); /* tc = 0 */
-- 
1.7.10.3

  parent reply	other threads:[~2017-09-19  1:31 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-19  1:29 [PATCH 00/53] net/qede/base: update PMD to 2.6.0.1 Rasesh Mody
2017-09-19  1:29 ` [PATCH 01/53] net/qede/base: add NVM config options Rasesh Mody
2017-09-19  1:29 ` [PATCH 02/53] net/qede/base: update management FW supported features Rasesh Mody
2017-09-19  1:29 ` [PATCH 03/53] net/qede/base: use crc32 OSAL macro Rasesh Mody
2017-09-19  1:29 ` [PATCH 04/53] net/qede/base: allocate VF queues before PF Rasesh Mody
2017-09-19  1:29 ` [PATCH 05/53] net/qede/base: convert device type to enum Rasesh Mody
2017-09-19  1:29 ` [PATCH 06/53] net/qede/base: changes for VF queue zone Rasesh Mody
2017-09-19  1:29 ` Rasesh Mody [this message]
2017-09-19  1:29 ` [PATCH 08/53] net/qede/base: add API to configure coalescing for VF queues Rasesh Mody
2017-09-19  1:29 ` [PATCH 09/53] net/qede/base: restrict cache line size register padding Rasesh Mody
2017-09-19  1:29 ` [PATCH 10/53] net/qede/base: fix to use a passed ptt handle Rasesh Mody
2017-09-19  1:29 ` [PATCH 11/53] net/qede/base: add a sanity check Rasesh Mody
2017-09-19  1:29 ` [PATCH 12/53] net/qede/base: add SmartAN support Rasesh Mody
2017-09-19  1:29 ` [PATCH 13/53] net/qede/base: alter driver's force load behavior Rasesh Mody
2017-09-19  1:29 ` [PATCH 14/53] net/qede/base: add mdump sub-commands Rasesh Mody
2017-09-19  1:29 ` [PATCH 15/53] net/qede/base: add EEE support Rasesh Mody
2017-09-19  1:29 ` [PATCH 16/53] net/qede/base: use passed ptt handler Rasesh Mody
2017-09-19  1:29 ` [PATCH 17/53] net/qede/base: prevent re-assertions of parity errors Rasesh Mody
2017-09-19  1:29 ` [PATCH 18/53] net/qede/base: avoid possible race condition Rasesh Mody
2017-09-19  1:29 ` [PATCH 19/53] net/qede/base: revise management FW mbox access scheme Rasesh Mody
2017-09-19  1:30 ` [PATCH 20/53] net/qede/base: remove helper functions/structures Rasesh Mody
2017-09-19  1:30 ` [PATCH 21/53] net/qede/base: initialize resc lock/unlock params Rasesh Mody
2017-09-19  1:30 ` [PATCH 22/53] net/qede/base: rename MFW get/set field defines Rasesh Mody
2017-09-19  1:30 ` [PATCH 23/53] net/qede/base: allow clients to override VF MSI-X table size Rasesh Mody
2017-09-19  1:30 ` [PATCH 24/53] net/qede/base: add API to send STAG config update to FW Rasesh Mody
2017-09-19  1:30 ` [PATCH 25/53] net/qede/base: add support for doorbell overflow recovery Rasesh Mody
2017-09-19  1:30 ` [PATCH 26/53] net/qede/base: block mbox command to unresponsive MFW Rasesh Mody
2017-09-19  1:30 ` [PATCH 27/53] net/qede/base: prevent stop vport assert by malicious VF Rasesh Mody
2017-09-19  1:30 ` [PATCH 28/53] net/qede/base: remove unused parameters Rasesh Mody
2017-09-19  1:30 ` [PATCH 29/53] net/qede/base: fix macros to check chip revision/metal Rasesh Mody
2017-09-20 11:00 ` [PATCH 00/53] net/qede/base: update PMD to 2.6.0.1 Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1505784633-1171-8-git-send-email-rasesh.mody@cavium.com \
    --to=rasesh.mody@cavium.com \
    --cc=Dept-EngDPDKDev@cavium.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.