All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 30/53] net/qede/base: read per queue coalescing from HW
@ 2017-09-19  1:51 Rasesh Mody
  2017-09-19  1:51 ` [PATCH 31/53] net/qede/base: refactor device's number of ports logic Rasesh Mody
                   ` (22 more replies)
  0 siblings, 23 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Add base driver API to read per queue coalescing from hardware.
Move ecore_set_rxq|txq_coalesce() declarations to ecore_l2.h.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dev_api.h |   14 ++++
 drivers/net/qede/base/ecore_l2.c      |  118 +++++++++++++++++++++++++++++++--
 drivers/net/qede/base/ecore_l2.h      |   24 +++++++
 drivers/net/qede/base/ecore_sriov.c   |   82 +++++++++++++++++++++--
 drivers/net/qede/base/ecore_vf.c      |   33 +++++++++
 drivers/net/qede/base/ecore_vf.h      |   30 +++++----
 drivers/net/qede/base/ecore_vfpf_if.h |   16 +++++
 7 files changed, 295 insertions(+), 22 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 8b28af9..fd453f5 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -635,6 +635,20 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn	*p_hwfn,
 					 struct ecore_ptt	*p_ptt,
 					 u16			id,
 					 bool			is_vf);
+
+/**
+ * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - store coalesce value read from the hardware.
+ * @param p_handle
+ *
+ * @return enum _ecore_status_t
+ **/
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
+			 void *handle);
+
 /**
  * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
  *    Tx queue. The fact that we can configure coalescing to up to 511, but on
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 3071b46..7c2299a 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -196,6 +196,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
 			u16 opaque_fid, u32 cid,
 			struct ecore_queue_start_common_params *p_params,
+			bool b_is_rx,
 			struct ecore_queue_cid_vf_params *p_vf_params)
 {
 	struct ecore_queue_cid *p_cid;
@@ -214,6 +215,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 	p_cid->rel.queue_id = p_params->queue_id;
 	p_cid->rel.stats_id = p_params->stats_id;
 	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+	p_cid->b_is_rx = b_is_rx;
 	p_cid->sb_idx = p_params->sb_idx;
 
 	/* Fill-in bits related to VFs' queues if information was provided */
@@ -287,6 +289,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 struct ecore_queue_cid *
 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
 		       struct ecore_queue_start_common_params *p_params,
+		       bool b_is_rx,
 		       struct ecore_queue_cid_vf_params *p_vf_params)
 {
 	struct ecore_queue_cid *p_cid;
@@ -321,7 +324,7 @@ struct ecore_queue_cid *
 	}
 
 	p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
-					p_params, p_vf_params);
+					p_params, b_is_rx, p_vf_params);
 	if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
 		_ecore_cxt_release_cid(p_hwfn, cid, vfid);
 
@@ -330,9 +333,11 @@ struct ecore_queue_cid *
 
 static struct ecore_queue_cid *
 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+			  bool b_is_rx,
 			  struct ecore_queue_start_common_params *p_params)
 {
-	return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
+	return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
+				      OSAL_NULL);
 }
 
 enum _ecore_status_t
@@ -984,7 +989,7 @@ enum _ecore_status_t
 	enum _ecore_status_t rc;
 
 	/* Allocate a CID for the queue */
-	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
 	if (p_cid == OSAL_NULL)
 		return ECORE_NOMEM;
 
@@ -1200,7 +1205,7 @@ enum _ecore_status_t
 	struct ecore_queue_cid *p_cid;
 	enum _ecore_status_t rc;
 
-	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
 	if (p_cid == OSAL_NULL)
 		return ECORE_INVAL;
 
@@ -2137,3 +2142,108 @@ enum _ecore_status_t
 
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
+
+int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+			   struct ecore_ptt *p_ptt,
+			   struct ecore_queue_cid *p_cid,
+			   u16 *p_rx_coal)
+{
+	u32 coalesce, address, is_valid;
+	struct cau_sb_entry sb_entry;
+	u8 timer_res;
+	enum _ecore_status_t rc;
+
+	rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+				 p_cid->sb_igu_id * sizeof(u64),
+				 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+		return rc;
+	}
+
+	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
+
+	address = BAR0_MAP_REG_USDM_RAM +
+		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+	coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+	if (!is_valid)
+		return ECORE_INVAL;
+
+	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+	*p_rx_coal = (u16)(coalesce << timer_res);
+
+	return ECORE_SUCCESS;
+}
+
+int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+			   struct ecore_ptt *p_ptt,
+			   struct ecore_queue_cid *p_cid,
+			   u16 *p_tx_coal)
+{
+	u32 coalesce, address, is_valid;
+	struct cau_sb_entry sb_entry;
+	u8 timer_res;
+	enum _ecore_status_t rc;
+
+	rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+				 p_cid->sb_igu_id * sizeof(u64),
+				 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+		return rc;
+	}
+
+	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
+
+	address = BAR0_MAP_REG_XSDM_RAM +
+		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+	coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+	if (!is_valid)
+		return ECORE_INVAL;
+
+	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+	*p_tx_coal = (u16)(coalesce << timer_res);
+
+	return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
+			 void *handle)
+{
+	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
+	struct ecore_ptt *p_ptt;
+
+	if (IS_VF(p_hwfn->p_dev)) {
+		rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
+		if (rc != ECORE_SUCCESS)
+			DP_NOTICE(p_hwfn, false,
+				  "Unable to read queue calescing\n");
+
+		return rc;
+	}
+
+	p_ptt = ecore_ptt_acquire(p_hwfn);
+	if (!p_ptt)
+		return ECORE_AGAIN;
+
+	if (p_cid->b_is_rx) {
+		rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+		if (rc != ECORE_SUCCESS)
+			goto out;
+	} else {
+		rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+		if (rc != ECORE_SUCCESS)
+			goto out;
+	}
+
+out:
+	ecore_ptt_release(p_hwfn, p_ptt);
+
+	return rc;
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 3618ae6..f4212cf 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -64,6 +64,8 @@ struct ecore_queue_cid {
 	u32 cid;
 	u16 opaque_fid;
 
+	bool b_is_rx;
+
 	/* VFs queues are mapped differently, so we need to know the
 	 * relative queue associated with them [0-based].
 	 * Notice this is relevant on the *PF* queue-cid of its VF's queues,
@@ -96,6 +98,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 struct ecore_queue_cid *
 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
 		       struct ecore_queue_start_common_params *p_params,
+		       bool b_is_rx,
 		       struct ecore_queue_cid_vf_params *p_vf_params);
 
 enum _ecore_status_t
@@ -140,4 +143,25 @@ enum _ecore_status_t
 			   u16 pq_id);
 
 u8 ecore_mcast_bin_from_mac(u8 *mac);
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+					    struct ecore_ptt *p_ptt,
+					    u16 coalesce,
+					    struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+					    struct ecore_ptt *p_ptt,
+					    u16 coalesce,
+					    struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+					    struct ecore_ptt *p_ptt,
+					    struct ecore_queue_cid *p_cid,
+					    u16 *p_hw_coal);
+
+enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+					    struct ecore_ptt *p_ptt,
+					    struct ecore_queue_cid *p_cid,
+					    u16 *p_hw_coal);
+
 #endif
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 82ba198..53d6b36 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -54,6 +54,7 @@
 	"CHANNEL_TLV_UPDATE_TUNN_PARAM",
 	"CHANNEL_TLV_COALESCE_UPDATE",
 	"CHANNEL_TLV_QID",
+	"CHANNEL_TLV_COALESCE_READ",
 	"CHANNEL_TLV_MAX"
 };
 
@@ -1392,6 +1393,8 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
 	REG_WR(p_hwfn,
 	       GTT_BAR0_MAP_REG_USDM_RAM +
 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+	OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
 }
 
 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
@@ -1476,8 +1479,6 @@ static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
 		      sizeof(struct channel_list_end_tlv));
 
 	ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
-
-	OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
 }
 
 struct ecore_public_vf_info
@@ -2258,7 +2259,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	vf_params.qid_usage_idx = qid_usage_idx;
 
 	p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
-				       &params, &vf_params);
+				       &params, true, &vf_params);
 	if (p_cid == OSAL_NULL)
 		goto out;
 
@@ -2532,7 +2533,7 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	vf_params.qid_usage_idx = qid_usage_idx;
 
 	p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
-				       &params, &vf_params);
+				       &params, false, &vf_params);
 	if (p_cid == OSAL_NULL)
 		goto out;
 
@@ -3452,6 +3453,76 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
 			       length, status);
 }
 
+static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+					 struct ecore_ptt *p_ptt,
+					 struct ecore_vf_info *p_vf)
+{
+	struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+	struct pfvf_read_coal_resp_tlv *p_resp;
+	struct vfpf_read_coal_req_tlv *req;
+	u8 status = PFVF_STATUS_FAILURE;
+	struct ecore_vf_queue *p_queue;
+	struct ecore_queue_cid *p_cid;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
+	u16 coal = 0, qid, i;
+	bool b_is_rx;
+
+	mbx->offset = (u8 *)mbx->reply_virt;
+	req = &mbx->req_virt->read_coal_req;
+
+	qid = req->qid;
+	b_is_rx = req->is_rx ? true : false;
+
+	if (b_is_rx) {
+		if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
+					    ECORE_IOV_VALIDATE_Q_ENABLE)) {
+			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+				   "VF[%d]: Invalid Rx queue_id = %d\n",
+				   p_vf->abs_vf_id, qid);
+			goto send_resp;
+		}
+
+		p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+		rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+		if (rc != ECORE_SUCCESS)
+			goto send_resp;
+	} else {
+		if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
+					    ECORE_IOV_VALIDATE_Q_ENABLE)) {
+			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+				   "VF[%d]: Invalid Tx queue_id = %d\n",
+				   p_vf->abs_vf_id, qid);
+			goto send_resp;
+		}
+		for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+			p_queue = &p_vf->vf_queues[qid];
+			if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
+			    (!p_queue->cids[i].b_is_tx))
+				continue;
+
+			p_cid = p_queue->cids[i].p_cid;
+
+			rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
+						    p_cid, &coal);
+			if (rc != ECORE_SUCCESS)
+				goto send_resp;
+			break;
+		}
+	}
+
+	status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+	p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
+			       sizeof(*p_resp));
+	p_resp->coal = coal;
+
+	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
 					 struct ecore_ptt *p_ptt,
 					 struct ecore_vf_info *vf)
@@ -3986,6 +4057,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
 		case CHANNEL_TLV_COALESCE_UPDATE:
 			ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
 			break;
+		case CHANNEL_TLV_COALESCE_READ:
+			ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+			break;
 		}
 	} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
 		/* If we've received a message from a VF we consider malicious
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index fb5d0a7..97ed0b7 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1454,6 +1454,39 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
 	return rc;
 }
 
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+					      u16 *p_coal,
+					      struct ecore_queue_cid *p_cid)
+{
+	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_read_coal_resp_tlv *resp;
+	struct vfpf_read_coal_req_tlv *req;
+	enum _ecore_status_t rc;
+
+	/* clear mailbox and prep header tlv */
+	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ,
+			       sizeof(*req));
+	req->qid = p_cid->rel.queue_id;
+	req->is_rx = p_cid->b_is_rx ? 1 : 0;
+
+	ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+	resp = &p_iov->pf2vf_reply->read_coal_resp;
+
+	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc != ECORE_SUCCESS)
+		goto exit;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		goto exit;
+
+	*p_coal = resp->coal;
+exit:
+	ecore_vf_pf_req_end(p_hwfn, rc);
+
+	return rc;
+}
+
 enum _ecore_status_t
 ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
 			 struct ecore_queue_cid     *p_cid)
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 0945522..8c44d37 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -51,23 +51,25 @@ struct ecore_vf_iov {
 	struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
 };
 
-
-enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
-					    struct ecore_ptt *p_ptt,
-					    u16 coalesce,
-					    struct ecore_queue_cid *p_cid);
-enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
-					    struct ecore_ptt *p_ptt,
-					    u16 coalesce,
-					    struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Get coalesce per VF's relative queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - coalesce value in micro second for VF queues.
+ * @param p_cid  - queue cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+					      u16 *p_coal,
+					      struct ecore_queue_cid *p_cid);
 /**
  * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
- *	Coalesce value '0' will omit the configuration.
+ *             Coalesce value '0' will omit the configuration.
  *
- *	@param p_hwfn
- *	@param rx_coal - coalesce value in micro second for rx queue
- *	@param tx_coal - coalesce value in micro second for tx queue
- *	@param queue_cid
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param p_cid   - queue cid
  *
  **/
 enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 4df5619..d632423 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -503,6 +503,19 @@ struct vfpf_update_coalesce {
 	u8 padding[2];
 };
 
+struct vfpf_read_coal_req_tlv {
+	struct vfpf_first_tlv first_tlv;
+	u16 qid;
+	u8 is_rx;
+	u8 padding[5];
+};
+
+struct pfvf_read_coal_resp_tlv {
+	struct pfvf_tlv hdr;
+	u16 coal;
+	u8 padding[6];
+};
+
 union vfpf_tlvs {
 	struct vfpf_first_tlv			first_tlv;
 	struct vfpf_acquire_tlv			acquire;
@@ -516,6 +529,7 @@ struct vfpf_update_coalesce {
 	struct vfpf_ucast_filter_tlv		ucast_filter;
 	struct vfpf_update_tunn_param_tlv	tunn_param_update;
 	struct vfpf_update_coalesce		update_coalesce;
+	struct vfpf_read_coal_req_tlv		read_coal_req;
 	struct tlv_buffer_size			tlv_buf_size;
 };
 
@@ -525,6 +539,7 @@ struct vfpf_update_coalesce {
 	struct tlv_buffer_size			tlv_buf_size;
 	struct pfvf_start_queue_resp_tlv	queue_start;
 	struct pfvf_update_tunn_param_tlv	tunn_param_resp;
+	struct pfvf_read_coal_resp_tlv		read_coal_resp;
 };
 
 /* This is a structure which is allocated in the VF, which the PF may update
@@ -644,6 +659,7 @@ enum {
 	CHANNEL_TLV_UPDATE_TUNN_PARAM,
 	CHANNEL_TLV_COALESCE_UPDATE,
 	CHANNEL_TLV_QID,
+	CHANNEL_TLV_COALESCE_READ,
 	CHANNEL_TLV_MAX,
 
 	/* Required for iterating over vport-update tlvs.
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 31/53] net/qede/base: refactor device's number of ports logic
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 32/53] net/qede/base: use proper units for rate limiting Rasesh Mody
                   ` (21 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

 - Avoid having num_of_ports as 0 [to prevent a division by 0 via
   MFW_PORT()]. Also fix the MFW_PORT() macro for CMT.
 - Read the device's number of ports from shmem.
 - Rename num_ports_in_engines to num_ports_in_engine.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h     |    3 +-
 drivers/net/qede/base/ecore_dev.c |   95 ++++++++++++++++++++++---------------
 drivers/net/qede/base/ecore_l2.c  |    4 +-
 drivers/net/qede/base/ecore_mcp.h |    7 ++-
 4 files changed, 66 insertions(+), 43 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 95cc01d..a1748f4 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -721,7 +721,8 @@ struct ecore_dev {
 #define CHIP_BOND_ID_SHIFT		0
 
 	u8				num_engines;
-	u8				num_ports_in_engines;
+	u8				num_ports;
+	u8				num_ports_in_engine;
 	u8				num_funcs_in_port;
 
 	u8				path_id;
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index e2698ea..67d8dd8 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -624,7 +624,7 @@ static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
 	qm_info->vport_wfq_en = 1;
 
 	/* TC config is different for AH 4 port */
-	four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+	four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
 
 	/* in AH 4 port we have fewer TCs per port */
 	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
@@ -653,7 +653,7 @@ static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
 {
 	/* Initialize qm port parameters */
-	u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+	u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
 
 	/* indicate how ooo and high pri traffic is dealt with */
 	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -1007,7 +1007,7 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
 		   qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
 
 	/* port table */
-	for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+	for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
 		port = &qm_info->qm_port_params[i];
 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
 			   "port idx %d, active %d, active_phys_tcs %d,"
@@ -1136,7 +1136,7 @@ static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
 
 	qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
 				      sizeof(struct init_qm_port_params) *
-				      p_hwfn->p_dev->num_ports_in_engines);
+				      p_hwfn->p_dev->num_ports_in_engine);
 	if (!qm_info->qm_port_params)
 		goto alloc_err;
 
@@ -1438,7 +1438,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
 	}
 
 	/* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
-	switch (p_hwfn->p_dev->num_ports_in_engines) {
+	switch (p_hwfn->p_dev->num_ports_in_engine) {
 	case 1:
 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
 		break;
@@ -1451,7 +1451,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
 	default:
 		DP_NOTICE(p_hwfn, true,
 			  "num_ports_in_engine = %d not supported\n",
-			  p_hwfn->p_dev->num_ports_in_engines);
+			  p_hwfn->p_dev->num_ports_in_engine);
 		return ECORE_INVAL;
 	}
 
@@ -1525,10 +1525,10 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
 		if (ECORE_IS_AH(p_dev)) {
 			/* 2 for 4-port, 1 for 2-port, 0 for 1-port */
 			ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
-				 (p_dev->num_ports_in_engines >> 1));
+				 (p_dev->num_ports_in_engine >> 1));
 
 			ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
-				 p_dev->num_ports_in_engines == 4 ? 0 : 3);
+				 p_dev->num_ports_in_engine == 4 ? 0 : 3);
 		}
 	}
 
@@ -1667,7 +1667,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
 	}
 
 	ecore_qm_common_rt_init(p_hwfn,
-				p_dev->num_ports_in_engines,
+				p_dev->num_ports_in_engine,
 				qm_info->max_phys_tcs_per_port,
 				qm_info->pf_rl_en, qm_info->pf_wfq_en,
 				qm_info->vport_rl_en, qm_info->vport_wfq_en,
@@ -3610,14 +3610,14 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
 				      struct ecore_ptt *p_ptt)
 {
+	struct ecore_dev *p_dev = p_hwfn->p_dev;
 	u32 port_mode;
 
 #ifndef ASIC_ONLY
 	/* Read the port mode */
-	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+	if (CHIP_REV_IS_FPGA(p_dev))
 		port_mode = 4;
-	else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
-		 (p_hwfn->p_dev->num_hwfns > 1))
+	else if (CHIP_REV_IS_EMUL(p_dev) && p_dev->num_hwfns > 1)
 		/* In CMT on emulation, assume 1 port */
 		port_mode = 1;
 	else
@@ -3625,38 +3625,39 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
 	port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
 
 	if (port_mode < 3) {
-		p_hwfn->p_dev->num_ports_in_engines = 1;
+		p_dev->num_ports_in_engine = 1;
 	} else if (port_mode <= 5) {
-		p_hwfn->p_dev->num_ports_in_engines = 2;
+		p_dev->num_ports_in_engine = 2;
 	} else {
 		DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
-			  p_hwfn->p_dev->num_ports_in_engines);
+			  p_dev->num_ports_in_engine);
 
-		/* Default num_ports_in_engines to something */
-		p_hwfn->p_dev->num_ports_in_engines = 1;
+		/* Default num_ports_in_engine to something */
+		p_dev->num_ports_in_engine = 1;
 	}
 }
 
 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
 					 struct ecore_ptt *p_ptt)
 {
+	struct ecore_dev *p_dev = p_hwfn->p_dev;
 	u32 port;
 	int i;
 
-	p_hwfn->p_dev->num_ports_in_engines = 0;
+	p_dev->num_ports_in_engine = 0;
 
 #ifndef ASIC_ONLY
-	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+	if (CHIP_REV_IS_EMUL(p_dev)) {
 		port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
 		switch ((port & 0xf000) >> 12) {
 		case 1:
-			p_hwfn->p_dev->num_ports_in_engines = 1;
+			p_dev->num_ports_in_engine = 1;
 			break;
 		case 3:
-			p_hwfn->p_dev->num_ports_in_engines = 2;
+			p_dev->num_ports_in_engine = 2;
 			break;
 		case 0xf:
-			p_hwfn->p_dev->num_ports_in_engines = 4;
+			p_dev->num_ports_in_engine = 4;
 			break;
 		default:
 			DP_NOTICE(p_hwfn, false,
@@ -3670,17 +3671,47 @@ static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
 					CNIG_REG_NIG_PORT0_CONF_K2_E5 +
 					(i * 4));
 			if (port & 1)
-				p_hwfn->p_dev->num_ports_in_engines++;
+				p_dev->num_ports_in_engine++;
 		}
+
+	if (!p_dev->num_ports_in_engine) {
+		DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
+
+		/* Default num_ports_in_engine to something */
+		p_dev->num_ports_in_engine = 1;
+	}
 }
 
 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
 				   struct ecore_ptt *p_ptt)
 {
-	if (ECORE_IS_BB(p_hwfn->p_dev))
+	struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+	/* Determine the number of ports per engine */
+	if (ECORE_IS_BB(p_dev))
 		ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
 	else
 		ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+
+	/* Get the total number of ports of the device */
+	if (p_dev->num_hwfns > 1) {
+		/* In CMT there is always only one port */
+		p_dev->num_ports = 1;
+#ifndef ASIC_ONLY
+	} else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
+		p_dev->num_ports = p_dev->num_ports_in_engine *
+				   ecore_device_num_engines(p_dev);
+#endif
+	} else {
+		u32 addr, global_offsize, global_addr;
+
+		addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+					    PUBLIC_GLOBAL);
+		global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+		global_addr = SECTION_ADDR(global_offsize, 0);
+		addr = global_addr + OFFSETOF(struct public_global, max_ports);
+		p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
+	}
 }
 
 static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
@@ -3724,14 +3755,8 @@ static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
 		}
 	}
 
-	/* TODO In get_hw_info, amoungst others:
-	 * Get MCP FW revision and determine according to it the supported
-	 * featrues (e.g. DCB)
-	 * Get boot mode
-	 * ecore_get_pcie_width_speed, WOL capability.
-	 * Number of global CQ-s (for storage
-	 */
-	ecore_hw_info_port_num(p_hwfn, p_ptt);
+	if (IS_LEAD_HWFN(p_hwfn))
+		ecore_hw_info_port_num(p_hwfn, p_ptt);
 
 	ecore_mcp_get_capabilities(p_hwfn, p_ptt);
 
@@ -5501,11 +5526,7 @@ int ecore_device_num_engines(struct ecore_dev *p_dev)
 
 int ecore_device_num_ports(struct ecore_dev *p_dev)
 {
-	/* in CMT always only one port */
-	if (p_dev->num_hwfns > 1)
-		return 1;
-
-	return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+	return p_dev->num_ports;
 }
 
 void ecore_set_fw_mac_addr(__le16 *fw_msb,
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 7c2299a..f94cb49 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1973,6 +1973,7 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 		struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
 		    ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+		bool b_get_port_stats;
 
 		if (IS_PF(p_dev)) {
 			/* The main vport index is relative first */
@@ -1987,8 +1988,9 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
 			continue;
 		}
 
+		b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
 		__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
-					IS_PF(p_dev) ? true : false);
+					b_get_port_stats);
 
 out:
 		if (IS_PF(p_dev) && p_ptt)
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 9f3fd70..569c064 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -25,11 +25,10 @@
 					    ((rel_pfid) | \
 					     ((p_hwfn)->abs_pf_id & 1) << 3) : \
 					     rel_pfid)
-#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+#define MCP_PF_ID(p_hwfn)	MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
 
 #define MFW_PORT(_p_hwfn)	((_p_hwfn)->abs_pf_id % \
-				 ((_p_hwfn)->p_dev->num_ports_in_engines * \
-				  ecore_device_num_engines((_p_hwfn)->p_dev)))
+				 ecore_device_num_ports((_p_hwfn)->p_dev))
 
 struct ecore_mcp_info {
 	/* List for mailbox commands which were sent and wait for a response */
@@ -112,7 +111,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
  *
  * @param p_hwfn
  * @param p_ptt
- * Can only be called after `num_ports_in_engines' is set
+ * Can only be called after `num_ports_in_engine' is set
  */
 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
 			     struct ecore_ptt *p_ptt);
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 32/53] net/qede/base: use proper units for rate limiting
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
  2017-09-19  1:51 ` [PATCH 31/53] net/qede/base: refactor device's number of ports logic Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 33/53] net/qede/base: use available macro Rasesh Mody
                   ` (20 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Change module parameter semantics to standard units (Rate limiting is used
for DCQCN feature).

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_sp_commands.c |   43 +++++++++++++++++++++++++----
 drivers/net/qede/base/ecore_sp_commands.h |    8 +++---
 2 files changed, 41 insertions(+), 10 deletions(-)

diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index d67805c..2f5d453 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -422,6 +422,22 @@ enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
+/* QM rate limiter resolution is 1.6Mbps */
+#define QM_RL_RESOLUTION(mb_val)	((mb_val) * 10 / 16)
+
+/* FW uses 1/64k to express gd */
+#define FW_GD_RESOLUTION(gd)		(64 * 1024 / (gd))
+
+u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
+{
+	return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
+}
+
+u16 ecore_sp_rl_gd_denom(u32 gd)
+{
+	return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
+}
+
 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
 					struct ecore_rl_update_params *params)
 {
@@ -453,15 +469,30 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
 	rl_update->rl_id_last = params->rl_id_last;
 	rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
 	rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
-	rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
-	rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
-	rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
-	rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
+	rl_update->rl_max_rate =
+		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
+	rl_update->rl_r_ai =
+		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
+	rl_update->rl_r_hai =
+		OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
+	rl_update->dcqcn_g =
+		OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
 	rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
-	rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
-		params->dcqcn_timeuot_us);
+	rl_update->dcqcn_timeuot_us =
+		OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
 	rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
 
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
+		   rl_update->qcn_update_param_flg,
+		   rl_update->dcqcn_update_param_flg,
+		   rl_update->rl_init_flg, rl_update->rl_start_flg,
+		   rl_update->rl_stop_flg, rl_update->rl_id_first,
+		   rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
+		   rl_update->rl_bc_rate, rl_update->rl_max_rate,
+		   rl_update->rl_r_ai, rl_update->rl_r_hai,
+		   rl_update->dcqcn_g, rl_update->dcqcn_k_us,
+		   rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
+
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 34d5a76..74f6a34 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -125,10 +125,10 @@ struct ecore_rl_update_params {
 	u8 rl_id_last;
 	u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
 	u32 rl_bc_rate; /* Byte Counter Limit */
-	u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
-	u16 rl_r_ai; /* Active increase rate */
-	u16 rl_r_hai; /* Hyper active increase rate */
-	u16 dcqcn_g; /* DCQCN Alpha update gain in 1/64K resolution */
+	u32 rl_max_rate; /* Maximum rate in Mbps resolution */
+	u32 rl_r_ai; /* Active increase rate */
+	u32 rl_r_hai; /* Hyper active increase rate */
+	u32 dcqcn_gd; /* DCQCN Alpha update gain */
 	u32 dcqcn_k_us; /* DCQCN Alpha update interval */
 	u32 dcqcn_timeuot_us;
 	u32 qcn_timeuot_us;
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 33/53] net/qede/base: use available macro
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
  2017-09-19  1:51 ` [PATCH 31/53] net/qede/base: refactor device's number of ports logic Rasesh Mody
  2017-09-19  1:51 ` [PATCH 32/53] net/qede/base: use proper units for rate limiting Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 34/53] net/qede/base: use function pointers for spq async callback Rasesh Mody
                   ` (19 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

 - Use OSAL_PAGE_SIZE instead of assuming it to be 4096.
 - While at it make sure the minimum number of doorbells that can be issued
   is 4.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h     |    1 +
 drivers/net/qede/base/ecore_dev.c |   16 ++++++++--------
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index a1748f4..823e8f8 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -50,6 +50,7 @@
 #define FCOE_BDQ_ID(_port_id) (_port_id + 2)
 /* Constants */
 #define ECORE_WID_SIZE		(1024)
+#define ECORE_MIN_WIDS		(4)
 
 /* Configurable */
 #define ECORE_PF_DEMS_SIZE	(4)
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 67d8dd8..0cd6f22 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1892,9 +1892,9 @@ static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
 		       struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
 {
-	u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
-	u32 dpi_bit_shift, dpi_count;
+	u32 dpi_bit_shift, dpi_count, dpi_page_size;
 	u32 min_dpis;
+	u32 n_wids;
 
 	/* Calculate DPI size
 	 * ------------------
@@ -1917,12 +1917,11 @@ static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
 	 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
 	 * containing 4 WIDs.
 	 */
-	dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
-	dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
-	dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
-	dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
+	n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
+	dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
+	dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) &
+			~(OSAL_PAGE_SIZE - 1);
 	dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
-
 	dpi_count = pwm_region_size / dpi_page_size;
 
 	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
@@ -1981,7 +1980,8 @@ enum ECORE_ROCE_EDPM_MODE {
 	    ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
 					  OSAL_NULL) +
 	    ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
-	norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+	norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,
+			       OSAL_PAGE_SIZE);
 	min_addr_reg1 = norm_regsize / 4096;
 	pwm_regsize = db_bar_size - norm_regsize;
 
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 34/53] net/qede/base: use function pointers for spq async callback
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (2 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 33/53] net/qede/base: use available macro Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 35/53] net/qede/base: fix API return types Rasesh Mody
                   ` (18 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Change spq async callback to use function pointers instead of switch case
on protocolid.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_spq.c   |   38 +++++++++++++++++++++++++++++------
 drivers/net/qede/base/ecore_spq.h   |   17 ++++++++++++++++
 drivers/net/qede/base/ecore_sriov.c |   20 ++++++++++++++----
 drivers/net/qede/base/ecore_sriov.h |   13 ------------
 4 files changed, 65 insertions(+), 23 deletions(-)

diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index ee0f06c..a346166 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -271,12 +271,16 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
 			     struct event_ring_entry *p_eqe)
 {
-	switch (p_eqe->protocol_id) {
-	case PROTOCOLID_COMMON:
-		return ecore_sriov_eqe_event(p_hwfn,
-					     p_eqe->opcode,
-					     p_eqe->echo, &p_eqe->data);
-	default:
+	ecore_spq_async_comp_cb cb;
+
+	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+		return ECORE_INVAL;
+
+	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+	if (cb) {
+		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+			  &p_eqe->data, p_eqe->fw_return_code);
+	} else {
 		DP_NOTICE(p_hwfn,
 			  true, "Unknown Async completion for protocol: %d\n",
 			  p_eqe->protocol_id);
@@ -284,6 +288,28 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
 	}
 }
 
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+			    enum protocol_type protocol_id,
+			    ecore_spq_async_comp_cb cb)
+{
+	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+		return ECORE_INVAL;
+
+	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+	return ECORE_SUCCESS;
+}
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+			      enum protocol_type protocol_id)
+{
+	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+		return;
+
+	p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
+}
+
 /***************************************************************************
  * EQ API
  ***************************************************************************/
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 31d8a3e..526cff0 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -86,6 +86,22 @@ struct ecore_consq {
 	struct ecore_chain	chain;
 };
 
+typedef enum _ecore_status_t
+(*ecore_spq_async_comp_cb)(struct ecore_hwfn *p_hwfn,
+			   u8 opcode,
+			   u16 echo,
+			   union event_ring_data *data,
+			   u8 fw_return_code);
+
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+			    enum protocol_type protocol_id,
+			    ecore_spq_async_comp_cb cb);
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+			      enum protocol_type protocol_id);
+
 struct ecore_spq {
 	osal_spinlock_t			lock;
 
@@ -127,6 +143,7 @@ struct ecore_spq {
 
 	u32				db_addr_offset;
 	struct core_db_data		db_data;
+	ecore_spq_async_comp_cb		async_comp_cb[MAX_PROTOCOL_TYPE];
 };
 
 struct ecore_port;
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 53d6b36..2b8e24c 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -27,6 +27,12 @@
 #include "ecore_init_fw_funcs.h"
 #include "ecore_sp_commands.h"
 
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+						  u8 opcode,
+						  __le16 echo,
+						  union event_ring_data *data,
+						  u8 fw_return_code);
+
 const char *ecore_channel_tlvs_string[] = {
 	"CHANNEL_TLV_NONE",	/* ends tlv sequence */
 	"CHANNEL_TLV_ACQUIRE",
@@ -591,6 +597,9 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
 
 	p_hwfn->pf_iov_info = p_sriov;
 
+	ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+				    ecore_sriov_eqe_event);
+
 	return ecore_iov_allocate_vfdb(p_hwfn);
 }
 
@@ -604,6 +613,8 @@ void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
 
 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
 {
+	ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
 		ecore_iov_free_vfdb(p_hwfn);
 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
@@ -4195,10 +4206,11 @@ static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
 	OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
 }
 
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
-					   u8 opcode,
-					   __le16 echo,
-					   union event_ring_data *data)
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+						  u8 opcode,
+						  __le16 echo,
+						  union event_ring_data *data,
+						  u8 OSAL_UNUSED fw_return_code)
 {
 	switch (opcode) {
 	case COMMON_EVENT_VF_PF_CHANNEL:
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index effeb69..31bdee1 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -254,19 +254,6 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
 void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
 
 /**
- * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
- *
- * @param p_hwfn
- * @param opcode
- * @param echo
- * @param data
- */
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn	 *p_hwfn,
-					   u8			 opcode,
-					   __le16		 echo,
-					   union event_ring_data *data);
-
-/**
  * @brief Mark structs of vfs that have been FLR-ed.
  *
  * @param p_hwfn
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 35/53] net/qede/base: fix API return types
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (3 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 34/53] net/qede/base: use function pointers for spq async callback Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 36/53] net/qede/base: semantic changes Rasesh Mody
                   ` (17 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev, stable

- Turn ecore_ptt_get_hw_addr() into static
- Convert ecore_dcbx_get_operational_params() to void since we don't
  care about the return value

Fixes: 26ae839d06e9 ("qede: add DCBX support")
Cc: stable@dpdk.org

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dcbx.c |    6 ++----
 drivers/net/qede/base/ecore_hw.c   |    2 +-
 drivers/net/qede/base/ecore_hw.h   |    9 ---------
 3 files changed, 3 insertions(+), 14 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 25ae21c..212b733 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -570,7 +570,7 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	params->remote.valid = true;
 }
 
-static enum _ecore_status_t
+static void
 ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
 				  struct ecore_dcbx_get *params)
 {
@@ -593,7 +593,7 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 		p_operational->enabled = enabled;
 		p_operational->valid = false;
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
-		return ECORE_INVAL;
+		return;
 	}
 
 	p_feat = &p_hwfn->p_dcbx_info->operational.features;
@@ -626,8 +626,6 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	p_operational->err = err;
 	p_operational->enabled = enabled;
 	p_operational->valid = true;
-
-	return ECORE_SUCCESS;
 }
 
 static void  ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 36457ac..d6217d8 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -136,7 +136,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 	OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
 }
 
-u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
+static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
 {
 	/* The HW is using DWORDS and we need to translate it to Bytes */
 	return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 0f3e88b..392351a 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -100,15 +100,6 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
 
 /**
- * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
- *
- * @param p_ptt
- *
- * @return u32
- */
-u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt);
-
-/**
  * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
  *
  * @param p_hwfn
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 36/53] net/qede/base: semantic changes
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (4 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 35/53] net/qede/base: fix API return types Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 37/53] net/qede/base: handle the error condition properly Rasesh Mody
                   ` (16 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

- Make ecore_dcbx_set_local_params(), ecore_cm_init_pf()  as static
- Remove src_proto()
- remove reference to PROTOCOLID_TOE when determining whether a a protocol
  requires SRC ILT memory
- Add auxiliary variables in various places
- All other changes are simple semantic corrections

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_cxt.c      |   55 +++++++++++++------------------
 drivers/net/qede/base/ecore_dcbx.c     |   15 +++------
 drivers/net/qede/base/ecore_dcbx.h     |    2 --
 drivers/net/qede/base/ecore_dev.c      |    2 +-
 drivers/net/qede/base/ecore_init_ops.c |    4 +--
 drivers/net/qede/base/ecore_l2.c       |   56 ++++++++++++++++++--------------
 drivers/net/qede/base/ecore_mcp.c      |    2 +-
 drivers/net/qede/base/ecore_mcp_api.h  |   20 ++++++++----
 drivers/net/qede/qede_main.c           |    2 +-
 9 files changed, 76 insertions(+), 82 deletions(-)

diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 24aeda9..46455ea 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -230,13 +230,6 @@ struct ecore_cxt_mngr {
 	/* TODO - VF arfs filters ? */
 };
 
-/* check if resources/configuration is required according to protocol type */
-static OSAL_INLINE bool src_proto(struct ecore_hwfn *p_hwfn,
-				  enum protocol_type type)
-{
-	return type == PROTOCOLID_TOE;
-}
-
 static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
 {
 	return type == PROTOCOLID_TOE;
@@ -270,16 +263,12 @@ struct ecore_src_iids {
 	u32 per_vf_cids;
 };
 
-static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
-					   struct ecore_cxt_mngr *p_mngr,
-					   struct ecore_src_iids *iids)
+static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+			       struct ecore_src_iids *iids)
 {
 	u32 i;
 
 	for (i = 0; i < MAX_CONN_TYPES; i++) {
-		if (!src_proto(p_hwfn, i))
-			continue;
-
 		iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
 		iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
 	}
@@ -397,6 +386,20 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
 	return OSAL_NULL;
 }
 
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+{
+	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+	p_mgr->srq_count = num_srqs;
+}
+
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+	return p_mgr->srq_count;
+}
+
 /* set the iids (cid/tid) count per protocol */
 static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
 				   enum protocol_type type,
@@ -706,7 +709,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 
 	/* SRC */
 	p_cli = &p_mngr->clients[ILT_CLI_SRC];
-	ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+	ecore_cxt_src_iids(p_mngr, &src_iids);
 
 	/* Both the PF and VFs searcher connections are stored in the per PF
 	 * database. Thus sum the PF searcher cids and all the VFs searcher
@@ -820,7 +823,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
 	if (!p_src->active)
 		return ECORE_SUCCESS;
 
-	ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+	ecore_cxt_src_iids(p_mngr, &src_iids);
 	conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 	total_size = conn_num * sizeof(struct src_ent);
 
@@ -1447,7 +1450,7 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 }
 
 /* CM PF */
-void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
 {
 	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
 		     ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
@@ -1642,7 +1645,7 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
 	struct ecore_src_iids src_iids;
 
 	OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
-	ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+	ecore_cxt_src_iids(p_mngr, &src_iids);
 	conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 	if (!conn_num)
 		return;
@@ -1769,9 +1772,11 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
 static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	struct ecore_conn_type_cfg *p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+	struct ecore_conn_type_cfg *p_fcoe;
 	struct ecore_tid_seg *p_tid;
 
+	p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+
 	/* If FCoE is active set the MAX OX_ID (tid) in the Parser */
 	if (!p_fcoe->cid_count)
 		return;
@@ -1972,20 +1977,6 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
-static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
-{
-	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
-	p_mgr->srq_count = num_srqs;
-}
-
-u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
-{
-	struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
-	return p_mgr->srq_count;
-}
-
 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
 {
 	/* Set the number of required CORE connections */
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 212b733..889d91a 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -955,11 +955,8 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 		return ECORE_INVAL;
 
 	p_ptt = ecore_ptt_acquire(p_hwfn);
-	if (!p_ptt) {
-		rc = ECORE_TIMEOUT;
-		DP_ERR(p_hwfn, "rc = %d\n", rc);
-		return rc;
-	}
+	if (!p_ptt)
+		return ECORE_TIMEOUT;
 
 	rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
 	if (rc != ECORE_SUCCESS)
@@ -1125,7 +1122,7 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
 }
 
-static enum _ecore_status_t
+static void
 ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
 			    struct dcbx_local_params *local_admin,
 			    struct ecore_dcbx_set *params)
@@ -1155,8 +1152,6 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 	if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
 		ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
 					&params->config.params, ieee);
-
-	return ECORE_SUCCESS;
 }
 
 static enum _ecore_status_t
@@ -1255,10 +1250,8 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
 
 	dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
 			       sizeof(*dcbx_info));
-	if (!dcbx_info) {
-		DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
+	if (!dcbx_info)
 		return ECORE_NOMEM;
-	}
 
 	OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
 	rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index eba2d91..a42ebb4 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -47,8 +47,6 @@ enum _ecore_status_t
 ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
 			    enum ecore_mib_read_type);
 
-enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
-						 struct ecore_ptt *);
 enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
 void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
 void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 0cd6f22..9c362a9 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -361,7 +361,7 @@ void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
 					 */
 
 /* Derived */
-#define ECORE_MIN_PWM_REGION	((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
+#define ECORE_MIN_PWM_REGION	(ECORE_WID_SIZE * ECORE_MIN_DPIS)
 
 enum BAR_ID {
 	BAR_ID_0,		/* used for GRC */
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 1a2d2f4..4491a14 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -450,12 +450,12 @@ static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
 				u32 phase, u32 phase_id)
 {
 	u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+	u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
 
 	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
 	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
 	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
-		return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
-				 INIT_IF_PHASE_OP_CMD_OFFSET);
+		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
 	else
 		return 0;
 }
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index f94cb49..8449215 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -235,7 +235,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
 	}
 
 	/* Calculate the engine-absolute indices of the resources.
-	 * The would guarantee they're valid later on.
+	 * This would guarantee they're valid later on.
 	 * In some cases [SBs] we already have the right values.
 	 */
 	rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
@@ -347,6 +347,7 @@ enum _ecore_status_t
 	struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
+	struct eth_vport_tpa_param *p_tpa;
 	u16 rx_mode = 0, tx_err = 0;
 	u8 abs_vport_id = 0;
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
@@ -371,8 +372,8 @@ enum _ecore_status_t
 	p_ramrod->vport_id = abs_vport_id;
 
 	p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
-	p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
 	p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
 	p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
 	p_ramrod->untagged = p_params->only_untagged;
 	p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
@@ -407,22 +408,22 @@ enum _ecore_status_t
 	p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
 
 	/* TPA related fields */
-	OSAL_MEMSET(&p_ramrod->tpa_param, 0,
-		    sizeof(struct eth_vport_tpa_param));
-	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+	p_tpa = &p_ramrod->tpa_param;
+	OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
+	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
 
 	switch (p_params->tpa_mode) {
 	case ECORE_TPA_MODE_GRO:
-		p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
-		p_ramrod->tpa_param.tpa_max_size = (u16)-1;
-		p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
-		p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
-		p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
-		p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
-		p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
-		p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
-		p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
-		p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+		p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+		p_tpa->tpa_max_size = (u16)-1;
+		p_tpa->tpa_min_size_to_cont = p_params->mtu / 2;
+		p_tpa->tpa_min_size_to_start = p_params->mtu / 2;
+		p_tpa->tpa_ipv4_en_flg = 1;
+		p_tpa->tpa_ipv6_en_flg = 1;
+		p_tpa->tpa_ipv4_tunn_en_flg = 1;
+		p_tpa->tpa_ipv6_tunn_en_flg = 1;
+		p_tpa->tpa_pkt_split_flg = 1;
+		p_tpa->tpa_gro_consistent_flg = 1;
 		break;
 	default:
 		break;
@@ -464,6 +465,7 @@ enum _ecore_status_t
 			  struct ecore_rss_params *p_rss)
 {
 	struct eth_vport_rss_config *p_config;
+	u16 capabilities = 0;
 	int i, table_size;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 
@@ -490,26 +492,26 @@ enum _ecore_status_t
 
 	p_config->capabilities = 0;
 
-	SET_FIELD(p_config->capabilities,
+	SET_FIELD(capabilities,
 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4));
-	SET_FIELD(p_config->capabilities,
+	SET_FIELD(capabilities,
 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6));
-	SET_FIELD(p_config->capabilities,
+	SET_FIELD(capabilities,
 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
-	SET_FIELD(p_config->capabilities,
+	SET_FIELD(capabilities,
 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
-	SET_FIELD(p_config->capabilities,
+	SET_FIELD(capabilities,
 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
-	SET_FIELD(p_config->capabilities,
+	SET_FIELD(capabilities,
 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
 	p_config->tbl_size = p_rss->rss_table_size_log;
-	p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
+	p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
@@ -641,6 +643,7 @@ enum _ecore_status_t
 			      struct ecore_sge_tpa_params *p_params)
 {
 	struct eth_vport_tpa_param *p_tpa;
+	u16 val;
 
 	if (!p_params) {
 		p_ramrod->common.update_tpa_param_flg = 0;
@@ -662,9 +665,12 @@ enum _ecore_status_t
 	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
 	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
 	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
-	p_tpa->tpa_max_size = p_params->tpa_max_size;
-	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
-	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+	val = p_params->tpa_max_size;
+	p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
+	val = p_params->tpa_min_size_to_start;
+	p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
+	val = p_params->tpa_min_size_to_cont;
+	p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
 }
 
 static void
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 5aa3210..6c99e94 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1441,7 +1441,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
 	 */
 	ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
 
-	return rc;
+	return ECORE_SUCCESS;
 }
 
 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index cc5a43e..be3e91f 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -59,9 +59,15 @@ struct ecore_mcp_link_capabilities {
 struct ecore_mcp_link_state {
 	bool link_up;
 
-	u32 line_speed; /* In Mb/s */
 	u32 min_pf_rate; /* In Mb/s */
-	u32 speed; /* In Mb/s */
+
+	/* Actual link speed in Mb/s */
+	u32 line_speed;
+
+	/* PF max speed in MB/s, deduced from line_speed
+	 * according to PF max bandwidth configuration.
+	 */
+	u32 speed;
 	bool full_duplex;
 
 	bool an;
@@ -594,9 +600,9 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
  * @param p_hwfn      - hw function
  * @param p_ptt       - PTT required for register access
  * @param cmd         - command to be sent to the MCP
- * @param param       - optional param
- * @param o_mcp_resp  - the MCP response code (exclude sequence)
- * @param o_mcp_param - optional parameter provided by the MCP response
+ * @param param       - Optional param
+ * @param o_mcp_resp  - The MCP response code (exclude sequence)
+ * @param o_mcp_param - Optional parameter provided by the MCP response
  *
  * @return enum _ecore_status_t -
  *      ECORE_SUCCESS - operation was successful
@@ -849,7 +855,7 @@ enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
  *  @param p_dev
  *  @param addr - nvm offset
  *  @param cmd - nvm command
- *  @param p_buf - nvm write buffer
+ *  @param p_buf - nvm read buffer
  *  @param len - buffer len
  *
  * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -862,7 +868,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
  *
  *  @param p_dev
  *  @param addr - nvm offset
- *  @param p_buf - nvm write buffer
+ *  @param p_buf - nvm read buffer
  *  @param len - buffer len
  *
  * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index e6d2351..2ca4206 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -423,7 +423,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 			info->num_queues +=
 			FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
 
-		if (edev->p_iov_info)
+		if (IS_ECORE_SRIOV(edev))
 			max_vf_vlan_filters = edev->p_iov_info->total_vfs *
 					      ECORE_ETH_VF_NUM_VLAN_FILTERS;
 		info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 37/53] net/qede/base: handle the error condition properly
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (5 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 36/53] net/qede/base: semantic changes Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 38/53] net/qede/base: add new macro for CMT mode Rasesh Mody
                   ` (15 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

If for some reason the send message from VF to PF times out, don't bail out
right away without taking proper cleanup action. The goto statement calls
ecore_vf_pf_req_end() which will unlock the mutex previously held.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_vf.c |    6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 97ed0b7..e84f97a 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -237,10 +237,8 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
 		/* send acquire request */
 		rc = ecore_send_msg2pf(p_hwfn,
 				       &resp->hdr.status, sizeof(*resp));
-
-		/* PF timeout */
-		if (rc)
-			return rc;
+		if (rc != ECORE_SUCCESS)
+			goto exit;
 
 		/* copy acquire response from buffer to p_hwfn */
 		OSAL_MEMCPY(&p_iov->acquire_resp,
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 38/53] net/qede/base: add new macro for CMT mode
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (6 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 37/53] net/qede/base: handle the error condition properly Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 39/53] net/qede/base: change verbosity Rasesh Mody
                   ` (14 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

- Add ECORE_IS_CMT macro (CMT: couple mode teaming) and use that in all the
places where there are checks for number of HW functions per device > 1.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h       |    5 +++--
 drivers/net/qede/base/ecore_dev.c   |   25 ++++++++++++-------------
 drivers/net/qede/base/ecore_mcp.c   |    2 +-
 drivers/net/qede/base/ecore_sriov.c |    4 ++--
 drivers/net/qede/qede_ethdev.c      |    8 ++++----
 drivers/net/qede/qede_fdir.c        |    6 +++---
 drivers/net/qede/qede_main.c        |    2 +-
 7 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 823e8f8..576af50 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -513,8 +513,8 @@ struct ecore_hwfn {
 #define IS_LEAD_HWFN(edev)		(!((edev)->my_id))
 	u8				rel_pf_id;	/* Relative to engine*/
 	u8				abs_pf_id;
-	#define ECORE_PATH_ID(_p_hwfn) \
-		(ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+#define ECORE_PATH_ID(_p_hwfn) \
+	(ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0)
 	u8				port_id;
 	bool				b_active;
 
@@ -765,6 +765,7 @@ struct ecore_dev {
 	/* HW functions */
 	u8				num_hwfns;
 	struct ecore_hwfn		hwfns[MAX_HWFNS_PER_DEVICE];
+#define ECORE_IS_CMT(dev)		((dev)->num_hwfns > 1)
 
 	/* SRIOV */
 	struct ecore_hw_sriov_info	*p_iov_info;
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 9c362a9..4db9d5b 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -103,7 +103,7 @@ struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
 	/* In CMT doorbell bar is split down the middle between engine 0 and
 	 * enigne 1
 	 */
-	if (p_dev->num_hwfns > 1)
+	if (ECORE_IS_CMT(p_dev))
 		p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
 			&p_dev->hwfns[0] : &p_dev->hwfns[1];
 	else
@@ -392,7 +392,7 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
 	 * they were found to be useful MFW started updating them from 8.7.7.0.
 	 * In older MFW versions they are set to 0 which means disabled.
 	 */
-	if (p_hwfn->p_dev->num_hwfns > 1) {
+	if (ECORE_IS_CMT(p_hwfn->p_dev)) {
 		DP_INFO(p_hwfn,
 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
 		val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
@@ -1483,7 +1483,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
 #endif
 		hw_mode |= 1 << MODE_ASIC;
 
-	if (p_hwfn->p_dev->num_hwfns > 1)
+	if (ECORE_IS_CMT(p_hwfn->p_dev))
 		hw_mode |= 1 << MODE_100G;
 
 	p_hwfn->hw_info.hw_mode = hw_mode;
@@ -1959,7 +1959,7 @@ enum ECORE_ROCE_EDPM_MODE {
 	u8 cond;
 
 	db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
-	if (p_hwfn->p_dev->num_hwfns > 1)
+	if (ECORE_IS_CMT(p_hwfn->p_dev))
 		db_bar_size /= 2;
 
 	/* Calculate doorbell regions
@@ -2078,7 +2078,7 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
 		else if (ECORE_IS_BB(p_hwfn->p_dev))
 			ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
 	} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
-		if (p_hwfn->p_dev->num_hwfns > 1) {
+		if (ECORE_IS_CMT(p_hwfn->p_dev)) {
 			/* Activate OPTE in CMT */
 			u32 val;
 
@@ -2337,8 +2337,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 	int i;
 
-	if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
-	    (p_dev->num_hwfns > 1)) {
+	if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
 		DP_NOTICE(p_dev, false,
 			  "MSI mode is not supported for CMT devices\n");
 		return ECORE_INVAL;
@@ -3560,7 +3559,7 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
 
 	if (reg_function_hide & 0x1) {
 		if (ECORE_IS_BB(p_dev)) {
-			if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
+			if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
 				num_funcs = 0;
 				eng_mask = 0xaaaa;
 			} else {
@@ -3617,7 +3616,7 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
 	/* Read the port mode */
 	if (CHIP_REV_IS_FPGA(p_dev))
 		port_mode = 4;
-	else if (CHIP_REV_IS_EMUL(p_dev) && p_dev->num_hwfns > 1)
+	else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
 		/* In CMT on emulation, assume 1 port */
 		port_mode = 1;
 	else
@@ -3694,7 +3693,7 @@ static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
 		ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
 
 	/* Get the total number of ports of the device */
-	if (p_dev->num_hwfns > 1) {
+	if (ECORE_IS_CMT(p_dev)) {
 		/* In CMT there is always only one port */
 		p_dev->num_ports = 1;
 #ifndef ASIC_ONLY
@@ -4133,7 +4132,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
 	p_params->personality = p_hwfn->hw_info.personality;
 
 	/* initilalize 2nd hwfn if necessary */
-	if (p_dev->num_hwfns > 1) {
+	if (ECORE_IS_CMT(p_dev)) {
 		void OSAL_IOMEM *p_regview, *p_doorbell;
 		u8 OSAL_IOMEM *addr;
 
@@ -5323,7 +5322,7 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
 	int i, rc = ECORE_INVAL;
 
 	/* TBD - for multiple hardware functions - that is 100 gig */
-	if (p_dev->num_hwfns > 1) {
+	if (ECORE_IS_CMT(p_dev)) {
 		DP_NOTICE(p_dev, false,
 			  "WFQ configuration is not supported for this device\n");
 		return rc;
@@ -5358,7 +5357,7 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
 	int i;
 
 	/* TBD - for multiple hardware functions - that is 100 gig */
-	if (p_dev->num_hwfns > 1) {
+	if (ECORE_IS_CMT(p_dev)) {
 		DP_VERBOSE(p_dev, ECORE_MSG_LINK,
 			   "WFQ configuration is not supported for this device\n");
 		return;
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 6c99e94..e6980e6 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -700,7 +700,7 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
 		load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
 
 	/* On CMT, always tell that it's engine */
-	if (p_hwfn->p_dev->num_hwfns > 1)
+	if (ECORE_IS_CMT(p_hwfn->p_dev))
 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
 
 	*p_load_code = load_phase;
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 2b8e24c..6d3fc4e 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1698,7 +1698,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 	}
 
 	/* On 100g PFs, prevent old VFs from loading */
-	if ((p_hwfn->p_dev->num_hwfns > 1) &&
+	if (ECORE_IS_CMT(p_hwfn->p_dev) &&
 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
 		DP_INFO(p_hwfn,
 			"VF[%d] is running an old driver that doesn't support"
@@ -1730,7 +1730,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 
 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
-	if (p_hwfn->p_dev->num_hwfns > 1)
+	if (ECORE_IS_CMT(p_hwfn->p_dev))
 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
 
 	/* Share our ability to use multiple queue-ids only with VFs
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 1af0427..fe130d4 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1161,7 +1161,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	PMD_INIT_FUNC_TRACE(edev);
 
 	/* Check requirements for 100G mode */
-	if (edev->num_hwfns > 1) {
+	if (ECORE_IS_CMT(edev)) {
 		if (eth_dev->data->nb_rx_queues < 2 ||
 				eth_dev->data->nb_tx_queues < 2) {
 			DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
@@ -1456,7 +1456,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 	rte_intr_disable(&pci_dev->intr_handle);
 	rte_intr_callback_unregister(&pci_dev->intr_handle,
 				     qede_interrupt_handler, (void *)eth_dev);
-	if (edev->num_hwfns > 1)
+	if (ECORE_IS_CMT(edev))
 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
 }
 
@@ -2035,7 +2035,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
 	params->update_rss_config = 1;
 
 	/* Fix up RETA for CMT mode device */
-	if (edev->num_hwfns > 1)
+	if (ECORE_IS_CMT(edev))
 		qdev->rss_enable = qede_update_rss_parm_cmt(edev,
 							    params);
 	vport_update_params.vport_id = 0;
@@ -2600,7 +2600,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	 * This is required since uio device uses only one MSI-x
 	 * interrupt vector but we need one for each engine.
 	 */
-	if (edev->num_hwfns > 1 && IS_PF(edev)) {
+	if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
 		rc = rte_eal_alarm_set(timer_period * US_PER_S,
 				       qede_poll_sp_sb_cb,
 				       (void *)eth_dev);
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index 7db7521..f8d60f5 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -53,7 +53,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 		DP_INFO(edev, "flowdir is disabled\n");
 	break;
 	case RTE_FDIR_MODE_PERFECT:
-		if (edev->num_hwfns > 1) {
+		if (ECORE_IS_CMT(edev)) {
 			DP_ERR(edev, "flowdir is not supported in 100G mode\n");
 			qdev->fdir_info.arfs.arfs_enable = false;
 			return -ENOTSUP;
@@ -386,7 +386,7 @@ void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
 	switch (filter_op) {
 	case RTE_ETH_FILTER_NOP:
 		/* Typically used to query flowdir support */
-		if (edev->num_hwfns > 1) {
+		if (ECORE_IS_CMT(edev)) {
 			DP_ERR(edev, "flowdir is not supported in 100G mode\n");
 			return -ENOTSUP;
 		}
@@ -425,7 +425,7 @@ int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
 	switch (filter_op) {
 	case RTE_ETH_FILTER_NOP:
 		/* Typically used to query fdir support */
-		if (edev->num_hwfns > 1) {
+		if (ECORE_IS_CMT(edev)) {
 			DP_ERR(edev, "flowdir is not supported in 100G mode\n");
 			return -ENOTSUP;
 		}
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 2ca4206..be63f5d 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -434,7 +434,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 	} else {
 		ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
 				      &info->num_queues);
-		if (edev->num_hwfns > 1) {
+		if (ECORE_IS_CMT(edev)) {
 			ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
 			info->num_queues += queues;
 		}
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 39/53] net/qede/base: change verbosity
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (7 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 38/53] net/qede/base: add new macro for CMT mode Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 40/53] net/qede/base: fix number of app table entries Rasesh Mody
                   ` (13 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Change verbosity of events related to malicious VFs to DP_NOTICE
from DP_INFO. They are not just informational events.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/common_hsi.h  |    4 ++--
 drivers/net/qede/base/ecore_sriov.c |   16 +++++++++++-----
 2 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index bfe50e1..a1257fb 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -718,8 +718,8 @@ struct iscsi_eqe_data {
  * Event Ring malicious VF data
  */
 struct malicious_vf_eqe_data {
-	u8 vfId /* Malicious VF ID */;
-	u8 errId /* Malicious VF error */;
+	u8 vf_id /* Malicious VF ID */;
+	u8 err_id /* Malicious VF error */;
 	__le16 reserved[3];
 };
 
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 6d3fc4e..b1ab80b 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -4192,16 +4192,22 @@ static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
 {
 	struct ecore_vf_info *p_vf;
 
-	p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+	p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
 
 	if (!p_vf)
 		return;
 
-	DP_INFO(p_hwfn,
-		"VF [%d] - Malicious behavior [%02x]\n",
-		p_vf->abs_vf_id, p_data->errId);
+	if (!p_vf->b_malicious) {
+		DP_NOTICE(p_hwfn, false,
+			  "VF [%d] - Malicious behavior [%02x]\n",
+			  p_vf->abs_vf_id, p_data->err_id);
 
-	p_vf->b_malicious = true;
+		p_vf->b_malicious = true;
+	} else {
+		DP_INFO(p_hwfn,
+			"VF [%d] - Malicious behavior [%02x]\n",
+			p_vf->abs_vf_id, p_data->err_id);
+	}
 
 	OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
 }
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 40/53] net/qede/base: fix number of app table entries
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (8 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 39/53] net/qede/base: change verbosity Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 41/53] net/qede/base: update firmware to 8.30.12.0 Rasesh Mody
                   ` (12 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev, stable

Configure only the available/requested number of app entries rather
than max entries (DCBX_MAX_APP_PROTOCOL) in ecore_dcbx_get_app_data().
Also, fixed a minor issue where incorrect size value is being passed for
memcpy().

Fixes: 26ae839d06e9 ("qede: add DCBX support")
Cc: stable@dpdk.org

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dcbx.c |    6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 889d91a..22525df 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -417,7 +417,7 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	p_params->app_error = GET_MFW_FIELD(p_app->flags, DCBX_APP_ERROR);
 	p_params->num_app_entries = GET_MFW_FIELD(p_app->flags,
 						  DCBX_APP_NUM_ENTRIES);
-	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+	for (i = 0; i < p_params->num_app_entries; i++) {
 		entry = &p_params->app_entry[i];
 		if (ieee) {
 			u8 sf_ieee;
@@ -1071,7 +1071,7 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 	p_app->flags |= (u32)p_params->num_app_entries <<
 			DCBX_APP_NUM_ENTRIES_OFFSET;
 
-	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+	for (i = 0; i < p_params->num_app_entries; i++) {
 		entry = &p_app->app_pri_tbl[i].entry;
 		*entry = 0;
 		if (ieee) {
@@ -1273,7 +1273,7 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
 	p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
 	OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
 		    &dcbx_info->operational.params,
-		    sizeof(struct ecore_dcbx_admin_params));
+		    sizeof(p_hwfn->p_dcbx_info->set.config.params));
 	p_hwfn->p_dcbx_info->set.config.valid = true;
 
 	OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 41/53] net/qede/base: update firmware to 8.30.12.0
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (9 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 40/53] net/qede/base: fix number of app table entries Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 42/53] net/qede/base: add UFP support Rasesh Mody
                   ` (11 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Upgrade QEDE PMD FW to version 8.30.12.0.

The firmware upgrade change details are as:
 - Add support for steering by IP and UDP destination port.
 - Add source QP field for GSI offload.
 - Add UFP support.
 - Add support for outer IPv4 TX CSO with unknown tunnel type (in addition
   to inner header CSO).
 - Support flow ID in accelerated RFS flow.
 - Allow Doorbell on empty SPQ and LL2 TX queue (for doorbell recovery).
 - Enable PCI Relaxed Ordering for L2 RX data placement.
 - Additional enhancements and bug fixes

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/common_hsi.h            |  760 +++++++++++-----------
 drivers/net/qede/base/ecore.h                 |    2 +-
 drivers/net/qede/base/ecore_cxt.c             |   10 +-
 drivers/net/qede/base/ecore_dcbx.c            |    2 -
 drivers/net/qede/base/ecore_dev.c             |   17 +-
 drivers/net/qede/base/ecore_hsi_common.h      |  245 +++++--
 drivers/net/qede/base/ecore_hsi_debug_tools.h |    6 +-
 drivers/net/qede/base/ecore_hsi_eth.h         |   65 +-
 drivers/net/qede/base/ecore_init_fw_funcs.c   |  455 +++++++------
 drivers/net/qede/base/ecore_init_fw_funcs.h   |   56 +-
 drivers/net/qede/base/ecore_int.c             |    9 +-
 drivers/net/qede/base/ecore_int.h             |    2 +-
 drivers/net/qede/base/ecore_int_api.h         |    6 +-
 drivers/net/qede/base/ecore_iov_api.h         |    4 +-
 drivers/net/qede/base/ecore_iro.h             |    8 +
 drivers/net/qede/base/ecore_iro_values.h      |   44 +-
 drivers/net/qede/base/ecore_l2.c              |   25 +-
 drivers/net/qede/base/ecore_proto_if.h        |    1 +
 drivers/net/qede/base/ecore_rt_defs.h         |  858 ++++++++++++++-----------
 drivers/net/qede/base/ecore_sp_commands.c     |    4 +-
 drivers/net/qede/base/ecore_spq.c             |    2 +-
 drivers/net/qede/base/ecore_sriov.c           |   18 +-
 drivers/net/qede/base/ecore_sriov.h           |    4 +-
 drivers/net/qede/base/reg_addr.h              |    2 +
 drivers/net/qede/qede_main.c                  |    2 +-
 drivers/net/qede/qede_rxtx.c                  |    8 +-
 26 files changed, 1469 insertions(+), 1146 deletions(-)

diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index a1257fb..9a6059a 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -97,8 +97,8 @@
 
 
 #define FW_MAJOR_VERSION		8
-#define FW_MINOR_VERSION		20
-#define FW_REVISION_VERSION		0
+#define FW_MINOR_VERSION		30
+#define FW_REVISION_VERSION		12
 #define FW_ENGINEERING_VERSION	0
 
 /***********************/
@@ -106,73 +106,70 @@
 /***********************/
 
 /* PCI functions */
-#define MAX_NUM_PORTS_K2	(4)
-#define MAX_NUM_PORTS_BB	(2)
-#define MAX_NUM_PORTS		(MAX_NUM_PORTS_K2)
-
-#define MAX_NUM_PFS_K2	(16)
-#define MAX_NUM_PFS_BB	(8)
-#define MAX_NUM_PFS	(MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
-
-#define MAX_NUM_VFS_BB	(120)
-#define MAX_NUM_VFS_K2	(192)
-#define E4_MAX_NUM_VFS	(MAX_NUM_VFS_K2)
-#define COMMON_MAX_NUM_VFS (240)
-
-#define MAX_NUM_FUNCTIONS_BB	(MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
-#define MAX_NUM_FUNCTIONS_K2	(MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
-#define MAX_NUM_FUNCTIONS	(MAX_NUM_PFS + E4_MAX_NUM_VFS)
+#define MAX_NUM_PORTS_BB        (2)
+#define MAX_NUM_PORTS_K2        (4)
+#define MAX_NUM_PORTS_E5        (4)
+#define MAX_NUM_PORTS           (MAX_NUM_PORTS_E5)
+
+#define MAX_NUM_PFS_BB          (8)
+#define MAX_NUM_PFS_K2          (16)
+#define MAX_NUM_PFS_E5          (16)
+#define MAX_NUM_PFS             (MAX_NUM_PFS_E5)
+#define MAX_NUM_OF_PFS_IN_CHIP  (16) /* On both engines */
+
+#define MAX_NUM_VFS_BB          (120)
+#define MAX_NUM_VFS_K2          (192)
+#define MAX_NUM_VFS_E4          (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_E5          (240)
+#define COMMON_MAX_NUM_VFS      (MAX_NUM_VFS_E5)
+
+#define MAX_NUM_FUNCTIONS_BB    (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2    (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS       (MAX_NUM_PFS + MAX_NUM_VFS_E4)
 
 /* in both BB and K2, the VF number starts from 16. so for arrays containing all
  * possible PFs and VFs - we need a constant for this size
  */
-#define MAX_FUNCTION_NUMBER_BB	(MAX_NUM_PFS + MAX_NUM_VFS_BB)
-#define MAX_FUNCTION_NUMBER_K2	(MAX_NUM_PFS + MAX_NUM_VFS_K2)
-#define MAX_FUNCTION_NUMBER	(MAX_NUM_PFS + E4_MAX_NUM_VFS)
-
-#define MAX_NUM_VPORTS_K2	(208)
-#define MAX_NUM_VPORTS_BB	(160)
-#define MAX_NUM_VPORTS		(MAX_NUM_VPORTS_K2)
+#define MAX_FUNCTION_NUMBER_BB      (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER_K2      (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_FUNCTION_NUMBER_E4      (MAX_NUM_PFS + MAX_NUM_VFS_E4)
+#define MAX_FUNCTION_NUMBER_E5      (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+#define COMMON_MAX_FUNCTION_NUMBER  (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+
+#define MAX_NUM_VPORTS_K2       (208)
+#define MAX_NUM_VPORTS_BB       (160)
+#define MAX_NUM_VPORTS_E4       (MAX_NUM_VPORTS_K2)
+#define MAX_NUM_VPORTS_E5       (256)
+#define COMMON_MAX_NUM_VPORTS   (MAX_NUM_VPORTS_E5)
 
-#define MAX_NUM_L2_QUEUES_K2	(320)
 #define MAX_NUM_L2_QUEUES_BB	(256)
-#define MAX_NUM_L2_QUEUES	(MAX_NUM_L2_QUEUES_K2)
+#define MAX_NUM_L2_QUEUES_K2    (320)
+#define MAX_NUM_L2_QUEUES_E5    (320) /* TODO_E5_VITALY - fix to 512 */
+#define MAX_NUM_L2_QUEUES		(MAX_NUM_L2_QUEUES_E5)
 
 /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
-/* 4-Port K2. */
-#define NUM_PHYS_TCS_4PORT_K2	(4)
-#define NUM_OF_PHYS_TCS		(8)
-
-#define NUM_TCS_4PORT_K2	(NUM_PHYS_TCS_4PORT_K2 + 1)
-#define NUM_OF_TCS		(NUM_OF_PHYS_TCS + 1)
-
-#define LB_TC			(NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO		(8)
-
-#define MAX_NUM_VOQS_K2		(NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB		(NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS		(MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS		(NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+#define NUM_PHYS_TCS_4PORT_K2     4
+#define NUM_PHYS_TCS_4PORT_TX_E5  6
+#define NUM_PHYS_TCS_4PORT_RX_E5  4
+#define NUM_OF_PHYS_TCS           8
+#define PURE_LB_TC                NUM_OF_PHYS_TCS
+#define NUM_TCS_4PORT_K2          (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_TCS_4PORT_TX_E5       (NUM_PHYS_TCS_4PORT_TX_E5 + 1)
+#define NUM_TCS_4PORT_RX_E5       (NUM_PHYS_TCS_4PORT_RX_E5 + 1)
+#define NUM_OF_TCS                (NUM_OF_PHYS_TCS + 1)
 
 /* CIDs */
-#define E4_NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_TASK_TYPES		(8)
-#define NUM_OF_LCIDS			(320)
-#define NUM_OF_LTIDS			(320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4		(375e6)
-#define STORM_CLK_FREQ_E4		(1000e6)
-#define CLK25M_CLK_FREQ_E4		(25e6)
+#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES_E5 (16)
+#define NUM_OF_TASK_TYPES       (8)
+#define NUM_OF_LCIDS            (320)
+#define NUM_OF_LTIDS            (320)
 
 /* Global PXP windows (GTT) */
-#define NUM_OF_GTT			19
-#define GTT_DWORD_SIZE_BITS	10
-#define GTT_BYTE_SIZE_BITS	(GTT_DWORD_SIZE_BITS + 2)
-#define GTT_DWORD_SIZE		(1 << GTT_DWORD_SIZE_BITS)
+#define NUM_OF_GTT          19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS  (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE      (1 << GTT_DWORD_SIZE_BITS)
 
 /* Tools Version */
 #define TOOLS_VERSION 10
@@ -417,49 +414,51 @@
 #define CAU_FSM_ETH_TX  1
 
 /* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB    12
+#define PIS_PER_SB_E4    12
+#define PIS_PER_SB_E5    8
+#define MAX_PIS_PER_SB_E4	 OSAL_MAX_T(PIS_PER_SB_E4, PIS_PER_SB_E5)
 
 /* fsm is stopped or not valid for this sb */
-#define CAU_HC_STOPPED_STATE	3
+#define CAU_HC_STOPPED_STATE		3
 /* fsm is working without interrupt coalescing for this sb*/
-#define CAU_HC_DISABLE_STATE	4
+#define CAU_HC_DISABLE_STATE		4
 /* fsm is working with interrupt coalescing for this sb*/
-#define CAU_HC_ENABLE_STATE	0
+#define CAU_HC_ENABLE_STATE			0
 
 
 /*****************/
 /* IGU CONSTANTS */
 /*****************/
 
-#define MAX_SB_PER_PATH_K2	(368)
-#define MAX_SB_PER_PATH_BB	(288)
-#define MAX_TOT_SB_PER_PATH \
-	MAX_SB_PER_PATH_K2
+#define MAX_SB_PER_PATH_K2			(368)
+#define MAX_SB_PER_PATH_BB			(288)
+#define MAX_SB_PER_PATH_E5			(512)
+#define MAX_TOT_SB_PER_PATH			MAX_SB_PER_PATH_E5
 
-#define MAX_SB_PER_PF_MIMD	129
-#define MAX_SB_PER_PF_SIMD	64
-#define MAX_SB_PER_VF		64
+#define MAX_SB_PER_PF_MIMD			129
+#define MAX_SB_PER_PF_SIMD			64
+#define MAX_SB_PER_VF				64
 
 /* Memory addresses on the BAR for the IGU Sub Block */
-#define IGU_MEM_BASE			0x0000
+#define IGU_MEM_BASE				0x0000
 
-#define IGU_MEM_MSIX_BASE		0x0000
-#define IGU_MEM_MSIX_UPPER		0x0101
-#define IGU_MEM_MSIX_RESERVED_UPPER	0x01ff
+#define IGU_MEM_MSIX_BASE			0x0000
+#define IGU_MEM_MSIX_UPPER			0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER		0x01ff
 
-#define IGU_MEM_PBA_MSIX_BASE		0x0200
-#define IGU_MEM_PBA_MSIX_UPPER		0x0202
-#define IGU_MEM_PBA_MSIX_RESERVED_UPPER	0x03ff
+#define IGU_MEM_PBA_MSIX_BASE			0x0200
+#define IGU_MEM_PBA_MSIX_UPPER			0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER		0x03ff
 
-#define IGU_CMD_INT_ACK_BASE		0x0400
-#define IGU_CMD_INT_ACK_UPPER		(IGU_CMD_INT_ACK_BASE +	\
-					 MAX_TOT_SB_PER_PATH -	\
-					 1)
-#define IGU_CMD_INT_ACK_RESERVED_UPPER	0x05ff
+#define IGU_CMD_INT_ACK_BASE			0x0400
+#define IGU_CMD_INT_ACK_UPPER			(IGU_CMD_INT_ACK_BASE + \
+						 MAX_TOT_SB_PER_PATH - \
+						 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER		0x05ff
 
-#define IGU_CMD_ATTN_BIT_UPD_UPPER	0x05f0
-#define IGU_CMD_ATTN_BIT_SET_UPPER	0x05f1
-#define IGU_CMD_ATTN_BIT_CLR_UPPER	0x05f2
+#define IGU_CMD_ATTN_BIT_UPD_UPPER		0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER		0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER		0x05f2
 
 #define IGU_REG_SISR_MDPC_WMASK_UPPER		0x05f3
 #define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER	0x05f4
@@ -467,8 +466,8 @@
 #define IGU_REG_SISR_MDPC_WOMASK_UPPER		0x05f6
 
 #define IGU_CMD_PROD_UPD_BASE			0x0600
-#define IGU_CMD_PROD_UPD_UPPER			(IGU_CMD_PROD_UPD_BASE +\
-						 MAX_TOT_SB_PER_PATH - \
+#define IGU_CMD_PROD_UPD_UPPER			(IGU_CMD_PROD_UPD_BASE + \
+						 MAX_TOT_SB_PER_PATH  - \
 						 1)
 #define IGU_CMD_PROD_UPD_RESERVED_UPPER		0x07ff
 
@@ -491,16 +490,16 @@
 #define PXP_PER_PF_ENTRY_SIZE		8
 #define PXP_NUM_GLOBAL_WINDOWS		243
 #define PXP_GLOBAL_ENTRY_SIZE		4
-#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH	4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH		4
 #define PXP_PF_WINDOW_ADMIN_START	0
 #define PXP_PF_WINDOW_ADMIN_LENGTH	0x1000
 #define PXP_PF_WINDOW_ADMIN_END		(PXP_PF_WINDOW_ADMIN_START + \
-					 PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+				PXP_PF_WINDOW_ADMIN_LENGTH - 1)
 #define PXP_PF_WINDOW_ADMIN_PER_PF_START	0
 #define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH	(PXP_NUM_PF_WINDOWS * \
 						 PXP_PER_PF_ENTRY_SIZE)
-#define PXP_PF_WINDOW_ADMIN_PER_PF_END	(PXP_PF_WINDOW_ADMIN_PER_PF_START + \
-					 PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+					PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
 #define PXP_PF_WINDOW_ADMIN_GLOBAL_START	0x200
 #define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH	(PXP_NUM_GLOBAL_WINDOWS * \
 						 PXP_GLOBAL_ENTRY_SIZE)
@@ -575,19 +574,79 @@
 #define PXP_BAR0_FIRST_INVALID_ADDRESS          \
 	(PXP_BAR0_END_PSDM + 1)
 
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN	12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER	1024
-
-/* ILT Records */
+/* VF BAR */
+#define PXP_VF_BAR0                             0
+
+#define PXP_VF_BAR0_START_IGU                   0
+#define PXP_VF_BAR0_IGU_LENGTH                  0x3000
+#define PXP_VF_BAR0_END_IGU                     \
+	(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ                    0x3000
+#define PXP_VF_BAR0_DQ_LENGTH                   0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET            0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS           \
+	(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         \
+	(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ                      \
+	(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B           0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B           0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B             \
+	(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B           0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B             \
+	(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B           0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B             \
+	(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B           0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B             \
+	(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B           0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B             \
+	(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B           0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B             \
+	(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC                   0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH                  0x200
+#define PXP_VF_BAR0_END_GRC                     \
+	(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A            0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A              0x10000
+
+#define PXP_VF_BAR0_START_IGU2                   0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH                  0xD000
+#define PXP_VF_BAR0_END_IGU2                     \
+	(PXP_VF_BAR0_START_IGU2 + PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH           32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN          12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER         1024
+
+// ILT Records
 #define PXP_NUM_ILT_RECORDS_BB 7600
 #define PXP_NUM_ILT_RECORDS_K2 11000
-#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-
+#define MAX_NUM_ILT_RECORDS \
+	OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
 
-/* Host Interface */
-#define PXP_QUEUES_ZONE_MAX_NUM	320
+#define PXP_NUM_ILT_RECORDS_E5 13664
 
 
+// Host Interface
+#define PXP_QUEUES_ZONE_MAX_NUM_E4	320
+#define PXP_QUEUES_ZONE_MAX_NUM_E5	512
 
 
 /*****************/
@@ -635,7 +694,8 @@
 /******************/
 
 /* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES_E4 3328
+#define PBF_MAX_CMD_LINES_E5 5280
 
 /* Number of BTB blocks. Each block is 256B. */
 #define BTB_MAX_BLOCKS 1440
@@ -645,17 +705,6 @@
 /*****************/
 
 #define PRS_GFT_CAM_LINES_NO_MATCH  31
-/* Async data KCQ CQE */
-struct async_data {
-	/* Context ID of the connection */
-	__le32	cid;
-	/* Task Id of the task (for error that happened on a a task) */
-	__le16	itid;
-	/* error code - relevant only if the opcode indicates its an error */
-	u8	error_code;
-	/* internal fw debug parameter */
-	u8	fw_debug_param;
-};
 
 /*
  * Interrupt coalescing TimeSet
@@ -683,22 +732,29 @@ struct eth_rx_prod_data {
 	__le16 cqe_prod /* CQE producer. */;
 };
 
-struct regpair {
-	__le32 lo /* low word for reg-pair */;
-	__le32 hi /* high word for reg-pair */;
+
+struct tcp_ulp_connect_done_params {
+	__le16 mss;
+	u8 snd_wnd_scale;
+	u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK     0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT    0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK  0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
 };
 
-/*
- * Event Ring VF-PF Channel data
- */
-struct vf_pf_channel_eqe_data {
-	struct regpair msg_addr /* VF-PF message address */;
+struct iscsi_connect_done_results {
+	__le16 icid /* Context ID of the connection */;
+	__le16 conn_id /* Driver connection ID */;
+/* decided tcp params after connect done */
+	struct tcp_ulp_connect_done_params params;
 };
 
+
 struct iscsi_eqe_data {
-	__le32 cid /* Context ID of the connection */;
-	    /* Task Id of the task (for error that happened on a a task) */;
-	__le16 conn_id;
+	__le16 icid /* Context ID of the connection */;
+	__le16 conn_id /* Driver connection ID */;
+	__le16 reserved;
 /* error code - relevant only if the opcode indicates its an error */
 	u8 error_code;
 	u8 error_pdu_opcode_reserved;
@@ -714,52 +770,10 @@ struct iscsi_eqe_data {
 #define ISCSI_EQE_DATA_RESERVED0_SHIFT              7
 };
 
-/*
- * Event Ring malicious VF data
- */
-struct malicious_vf_eqe_data {
-	u8 vf_id /* Malicious VF ID */;
-	u8 err_id /* Malicious VF error */;
-	__le16 reserved[3];
-};
 
 /*
- * Event Ring initial cleanup data
+ * Multi function mode
  */
-struct initial_cleanup_eqe_data {
-	u8 vfId /* VF ID */;
-	u8 reserved[7];
-};
-
-/*
- * Event Data Union
- */
-union event_ring_data {
-	u8 bytes[8] /* Byte Array */;
-	struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
-	struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
-	struct regpair roceHandle /* Dedicated field for RDMA data */;
-	struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
-	struct initial_cleanup_eqe_data vf_init_cleanup
-	    /* VF Initial Cleanup data */;
-};
-/* Event Ring Entry */
-struct event_ring_entry {
-	u8 protocol_id /* Event Protocol ID */;
-	u8 opcode /* Event Opcode */;
-	__le16 reserved0 /* Reserved */;
-	__le16 echo /* Echo value from ramrod data on the host */;
-	u8 fw_return_code /* FW return code for SP ramrods */;
-	u8 flags;
-/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
-#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
-#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
-	union event_ring_data	data;
-};
-
-/* Multi function mode */
 enum mf_mode {
 	ERROR_MODE /* Unsupported mode */,
 	MF_OVLAN /* Multi function based on outer VLAN */,
@@ -783,6 +797,12 @@ enum protocol_type {
 };
 
 
+struct regpair {
+	__le32 lo /* low word for reg-pair */;
+	__le32 hi /* high word for reg-pair */;
+};
+
+
 
 /*
  * Ustorm Queue Zone
@@ -852,6 +872,18 @@ struct cau_sb_entry {
 #define CAU_SB_ENTRY_TPH_SHIFT         31
 };
 
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+	IGU_COMMAND_TYPE_NOP = 0,
+	IGU_COMMAND_TYPE_SET = 1,
+	MAX_COMMAND_TYPE_BIT
+};
+
+
 /* core doorbell data */
 struct core_db_data {
 	u8 params;
@@ -1008,7 +1040,7 @@ struct db_rdma_dpm_params {
 #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT     28
 #define DB_RDMA_DPM_PARAMS_S_FLG_MASK               0x1 /* RoCE S flag */
 #define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT              29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK           0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK           0x1
 #define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT          30
 /* Connection type is iWARP */
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK  0x1
@@ -1072,9 +1104,9 @@ enum igu_seg_access {
  * to the last-ethertype)
  */
 enum l3_type {
-	e_l3Type_unknown,
-	e_l3Type_ipv4,
-	e_l3Type_ipv6,
+	e_l3_type_unknown,
+	e_l3_type_ipv4,
+	e_l3_type_ipv6,
 	MAX_L3_TYPE
 };
 
@@ -1085,9 +1117,9 @@ enum l3_type {
  * first fragment, the protocol-type should be set to none.
  */
 enum l4_protocol {
-	e_l4Protocol_none,
-	e_l4Protocol_tcp,
-	e_l4Protocol_udp,
+	e_l4_protocol_none,
+	e_l4_protocol_tcp,
+	e_l4_protocol_udp,
 	MAX_L4_PROTOCOL
 };
 
@@ -1311,260 +1343,230 @@ struct pxp_vf_zone_a_permission {
  * Rdif context
  */
 struct rdif_task_context {
-	__le32 initialRefTag;
-	__le16 appTagValue;
-	__le16 appTagMask;
+	__le32 initial_ref_tag;
+	__le16 app_tag_value;
+	__le16 app_tag_mask;
 	u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK            0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT           0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK      0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT     1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK             0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT            0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK      0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT     1
 /* 0 = IP checksum, 1 = CRC */
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK           0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT          2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK         0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT        3
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK            0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT           2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK         0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT        3
 /* 1/2/3 - Protection Type */
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK          0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT         4
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK            0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT           4
 /* 0=0x0000, 1=0xffff */
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT               6
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                   0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT                  6
 /* Keep reference tag constant */
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK         0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT        7
-	u8 partialDifData[7];
-	__le16 partialCrcValue;
-	__le16 partialChecksumValue;
-	__le32 offsetInIO;
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK         0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT        7
+	u8 partial_dif_data[7];
+	__le16 partial_crc_value;
+	__le16 partial_checksum_value;
+	__le32 offset_in_io;
 	__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK           0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT          0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT         1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT         2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK            0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT           3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT          4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT          5
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK             0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT            0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK           0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT          1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK           0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT          2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK              0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT             3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK            0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT           4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK            0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT           5
 /* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK            0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT           6
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK              0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT             6
 /* 0=None, 1=DIF, 2=DIX */
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK           0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT          9
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK             0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT            9
 /* DIF tag right at the beginning of DIF interval */
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK           0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT          11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK               0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT              12
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK            0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT           11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK                  0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT                 12
 /* 0=None, 1=DIF */
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK        0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT       13
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK          0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT         13
 /* Forward application tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT  14
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK  0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
 /* Forward reference tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT  15
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK  0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
 	__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK    0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT   0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK  0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK               0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT              8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK        0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT       9
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK    0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT   0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK  0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK                0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT               8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK          0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT         9
 /* mask for refernce tag handling */
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK              0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT             10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK               0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT              14
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK               0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT              10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK                  0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT                 14
 	__le32 reserved2;
 };
 
-/* RSS hash type */
+/*
+ * RSS hash type
+ */
 enum rss_hash_type {
-	RSS_HASH_TYPE_DEFAULT	= 0,
-	RSS_HASH_TYPE_IPV4	= 1,
-	RSS_HASH_TYPE_TCP_IPV4	= 2,
-	RSS_HASH_TYPE_IPV6	= 3,
-	RSS_HASH_TYPE_TCP_IPV6	= 4,
-	RSS_HASH_TYPE_UDP_IPV4	= 5,
-	RSS_HASH_TYPE_UDP_IPV6	= 6,
+	RSS_HASH_TYPE_DEFAULT = 0,
+	RSS_HASH_TYPE_IPV4 = 1,
+	RSS_HASH_TYPE_TCP_IPV4 = 2,
+	RSS_HASH_TYPE_IPV6 = 3,
+	RSS_HASH_TYPE_TCP_IPV6 = 4,
+	RSS_HASH_TYPE_UDP_IPV4 = 5,
+	RSS_HASH_TYPE_UDP_IPV6 = 6,
 	MAX_RSS_HASH_TYPE
 };
 
-/* status block structure */
-struct status_block {
-	__le16	pi_array[PIS_PER_SB];
-	__le32	sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT     0
-#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
-#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+/*
+ * status block structure
+ */
+struct status_block_e4 {
+	__le16 pi_array[PIS_PER_SB_E4];
+	__le32 sb_num;
+#define STATUS_BLOCK_E4_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT  16
 	__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT  24
 };
 
 
-/* VF BAR */
-#define PXP_VF_BAR0 0
-
-#define PXP_VF_BAR0_START_GRC		0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH		0x200
-#define PXP_VF_BAR0_END_GRC \
-(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU		0
-#define PXP_VF_BAR0_IGU_LENGTH		0x3000
-#define PXP_VF_BAR0_END_IGU \
-(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ		0x3000
-#define PXP_VF_BAR0_DQ_LENGTH		0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET    0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
-(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
-(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
-#define PXP_VF_BAR0_END_DQ \
-(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B   0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B   0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B \
-(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B   0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B \
-(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B   0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B \
-(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B   0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B \
-(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B   0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B \
-(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B   0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B \
-(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A    0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A      0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH   32
+/*
+ * status block structure
+ */
+struct status_block_e5 {
+	__le16 pi_array[PIS_PER_SB_E5];
+	__le32 sb_num;
+#define STATUS_BLOCK_E5_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_E5_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_E5_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_E5_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_E5_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_E5_ZERO_PAD2_SHIFT  16
+	__le32 prod_index;
+#define STATUS_BLOCK_E5_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_E5_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_E5_ZERO_PAD3_SHIFT  24
+};
+
 
 /*
  * Tdif context
  */
 struct tdif_task_context {
-	__le32 initialRefTag;
-	__le16 appTagValue;
-	__le16 appTagMask;
-	__le16 partialCrcValueB;
-	__le16 partialChecksumValueB;
+	__le32 initial_ref_tag;
+	__le16 app_tag_value;
+	__le16 app_tag_mask;
+	__le16 partial_crc_value_b;
+	__le16 partial_checksum_value_b;
 	__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT   0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT              8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK         0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT        9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK                0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT               10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK    0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT   0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK  0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK                0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT               8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK             0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT            9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK                    0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT                   10
 	u8 reserved1;
 	u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK             0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT            0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK       0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT      1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK               0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT              0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK        0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT       1
 /* 0 = IP checksum, 1 = CRC */
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK            0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT           2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK          0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT         3
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK              0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT             2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK           0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT          3
 /* 1/2/3 - Protection Type */
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK           0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT          4
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK              0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT             4
 /* 0=0x0000, 1=0xffff */
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT               7
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                     0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                    6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK                    0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT                   7
 	__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK            0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT           0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT          1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT          2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK             0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT            3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT           4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT           5
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK               0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT              0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK             0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT            1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK             0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT            2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK                0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT               3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK              0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT             4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK              0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT             5
 /* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK             0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT            6
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK                0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT               6
 /* 0=None, 1=DIF, 2=DIX */
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK            0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT           9
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK               0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT              9
 /* DIF tag right at the beginning of DIF interval */
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK            0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT           11
-/* reserved */
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT               12
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK              0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT             11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK                    0x1 /* reserved */
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT                   12
 /* 0=None, 1=DIF */
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK         0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT        13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT   14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT              22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK        0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT       23
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK            0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT           13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK    0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT   14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK  0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK                0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT               22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK          0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT         23
 /* mask for refernce tag handling */
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK               0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT              24
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK                 0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT                24
 /* Forward application tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT   28
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK    0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT   28
 /* Forward reference tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT   29
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK    0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT   29
 /* Keep reference tag constant */
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK          0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT         30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT               31
-	__le32 offsetInIOB;
-	__le16 partialCrcValueA;
-	__le16 partialChecksumValueA;
-	__le32 offsetInIOA;
-	u8 partialDifDataA[8];
-	u8 partialDifDataB[8];
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK           0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT          30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK                    0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT                   31
+	__le32 offset_in_io_b;
+	__le16 partial_crc_value_a;
+	__le16 partial_checksum_value_a;
+	__le32 offset_in_io_a;
+	u8 partial_dif_data_a[8];
+	u8 partial_dif_data_b[8];
 };
 
 
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 576af50..0199608 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -846,8 +846,8 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
 	return sw_fid;
 }
 
-#define PURE_LB_TC 8
 #define PKT_LB_TC 9
+#define MAX_NUM_VOQS_E4 20
 
 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 46455ea..3ebeb12 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -59,8 +59,8 @@
 
 /* connection context union */
 union conn_context {
-	struct core_conn_context core_ctx;
-	struct eth_conn_context eth_ctx;
+	struct e4_core_conn_context core_ctx;
+	struct e4_eth_conn_context eth_ctx;
 };
 
 /* TYPE-0 task context - iSCSI, FCOE */
@@ -1432,11 +1432,14 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 {
 	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+	struct ecore_mcp_link_state *p_link;
 	struct ecore_qm_iids iids;
 
 	OSAL_MEM_ZERO(&iids, sizeof(iids));
 	ecore_cxt_qm_iids(p_hwfn, &iids);
 
+	p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
 	ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
 			    p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
 			    iids.cids, iids.vf_cids, iids.tids,
@@ -1445,7 +1448,8 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 			    qm_info->num_vf_pqs,
 			    qm_info->start_vport,
 			    qm_info->num_vports, qm_info->pf_wfq,
-			    qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+			    qm_info->pf_rl, p_link->speed,
+			    p_hwfn->qm_info.qm_pq_params,
 			    p_hwfn->qm_info.qm_vport_params);
 }
 
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 22525df..66f21fb 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -931,8 +931,6 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
 	struct protocol_dcb_data *p_dcb_data;
 	u8 update_flag;
 
-	p_dest->pf_id = p_src->pf_id;
-
 	update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
 	p_dest->update_eth_dcb_data_mode = update_flag;
 	update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 4db9d5b..edf2896 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -665,7 +665,7 @@ static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
 
 		p_qm_port->active = 1;
 		p_qm_port->active_phys_tcs = active_phys_tcs;
-		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
 	}
 }
@@ -2059,7 +2059,21 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
 					       struct ecore_ptt *p_ptt,
 					       int hw_mode)
 {
+	u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE];
+	u32 val;
 	enum _ecore_status_t rc	= ECORE_SUCCESS;
+	u8 i;
+
+	/* In CMT for non-RoCE packets - use connection based classification */
+	val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0;
+	for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++)
+		ppf_to_eng_sel[i] = val;
+	STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET,
+			 ppf_to_eng_sel);
+
+	/* In CMT the gate should be cleared by the 2nd hwfn */
+	if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
+		STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
 
 	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
 			    hw_mode);
@@ -3959,7 +3973,6 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
 			   "Mark hw/fw uninitialized\n");
 
 		p_hwfn->hw_init_done = false;
-		p_hwfn->first_on_engine = false;
 
 		ecore_ptt_invalidate(p_hwfn);
 	}
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 5c2a08f..d8abd60 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -618,7 +618,7 @@ struct ustorm_core_conn_st_ctx {
 /*
  * core connection context
  */
-struct core_conn_context {
+struct e4_core_conn_context {
 /* ystorm storm context */
 	struct ystorm_core_conn_st_ctx ystorm_st_context;
 	struct regpair ystorm_st_padding[2] /* padding */;
@@ -661,6 +661,7 @@ enum core_event_opcode {
 	CORE_EVENT_RX_QUEUE_START,
 	CORE_EVENT_RX_QUEUE_STOP,
 	CORE_EVENT_RX_QUEUE_FLUSH,
+	CORE_EVENT_TX_QUEUE_UPDATE,
 	MAX_CORE_EVENT_OPCODE
 };
 
@@ -745,6 +746,7 @@ enum core_ramrod_cmd_id {
 	CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
 	CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
 	CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
+	CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
 	MAX_CORE_RAMROD_CMD_ID
 };
 
@@ -858,7 +860,8 @@ struct core_rx_gsi_offload_cqe {
 	__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
 /* These are the lower 16 bit of QP id in RoCE BTH header */
 	__le16 qp_id;
-	__le32 gid_dst[4] /* Gid destination address */;
+	__le32 src_qp /* Source QP from DETH header */;
+	__le32 reserved[3];
 };
 
 /*
@@ -899,7 +902,10 @@ struct core_rx_start_ramrod_data {
 	u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
 	__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
 /* if set, 802.1q tags will be removed and copied to CQE */
-	u8 inner_vlan_removal_en;
+/* if set, 802.1q tags will be removed and copied to CQE */
+	u8 inner_vlan_stripping_en;
+/* if set, outer tag wont be stripped, valid only in MF OVLAN. */
+	u8 outer_vlan_stripping_dis;
 	u8 queue_id /* Light L2 RX Queue ID */;
 	u8 main_func_queue /* Is this the main queue for the PF */;
 /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
@@ -916,7 +922,7 @@ struct core_rx_start_ramrod_data {
 	struct core_rx_action_on_error action_on_error;
 /* set when in GSI offload mode on ROCE connection */
 	u8 gsi_offload_flag;
-	u8 reserved[7];
+	u8 reserved[6];
 };
 
 
@@ -938,48 +944,51 @@ struct core_rx_stop_ramrod_data {
 struct core_tx_bd_data {
 	__le16 as_bitfield;
 /* Do not allow additional VLAN manipulations on this packet (DCB) */
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK      0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK         0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT        0
 /* Insert VLAN into packet */
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK       0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK          0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT         1
 /* This is the first BD of the packet (for debug) */
-#define CORE_TX_BD_DATA_START_BD_MASK             0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT            2
+#define CORE_TX_BD_DATA_START_BD_MASK                0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT               2
 /* Calculate the IP checksum for the packet */
-#define CORE_TX_BD_DATA_IP_CSUM_MASK              0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_DATA_IP_CSUM_MASK                 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT                3
 /* Calculate the L4 checksum for the packet */
-#define CORE_TX_BD_DATA_L4_CSUM_MASK              0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_DATA_L4_CSUM_MASK                 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT                4
 /* Packet is IPv6 with extensions */
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK             0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK                0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT               5
 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
  * 0-TCP, 1-UDP
  */
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK          0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK             0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT            6
 /* The pseudo checksum mode to place in the L4 checksum field. Required only
  * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
  */
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK  0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK     0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT    7
 /* Number of BDs that make up one packet - width wide enough to present
  * CORE_LL2_TX_MAX_BDS_PER_PACKET
  */
-#define CORE_TX_BD_DATA_NBDS_MASK                 0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT                8
+#define CORE_TX_BD_DATA_NBDS_MASK                    0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT                   8
 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when
  * connType is ROCE (use enum core_roce_flavor_type)
  */
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK            0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK               0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT              12
 /* Calculate ip length */
-#define CORE_TX_BD_DATA_IP_LEN_MASK               0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
-#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
+#define CORE_TX_BD_DATA_IP_LEN_MASK                  0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT                 13
+/* disables the STAG insertion, relevant only in MF OVLAN mode. */
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK  0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK               0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT              15
 };
 
 /*
@@ -1046,6 +1055,17 @@ struct core_tx_stop_ramrod_data {
 
 
 /*
+ * Ramrod data for tx queue update ramrod
+ */
+struct core_tx_update_ramrod_data {
+	u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+	u8 reserved0;
+	__le16 qm_pq_id /* Updated QM PQ ID */;
+	__le32 reserved1[1];
+};
+
+
+/*
  * Enum flag for what type of dcb data to update
  */
 enum dcb_dscp_update_mode {
@@ -1182,6 +1202,63 @@ struct eth_ustorm_per_queue_stat {
 
 
 /*
+ * Event Ring VF-PF Channel data
+ */
+struct vf_pf_channel_eqe_data {
+	struct regpair msg_addr /* VF-PF message address */;
+};
+
+/*
+ * Event Ring malicious VF data
+ */
+struct malicious_vf_eqe_data {
+	u8 vf_id /* Malicious VF ID */;
+	u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
+	__le16 reserved[3];
+};
+
+/*
+ * Event Ring initial cleanup data
+ */
+struct initial_cleanup_eqe_data {
+	u8 vf_id /* VF ID */;
+	u8 reserved[7];
+};
+
+/*
+ * Event Data Union
+ */
+union event_ring_data {
+	u8 bytes[8] /* Byte Array */;
+	struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+	struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+/* Dedicated fields to iscsi connect done results */
+	struct iscsi_connect_done_results iscsi_conn_done_info;
+	struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+/* VF Initial Cleanup data */
+	struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+
+/*
+ * Event Ring Entry
+ */
+struct event_ring_entry {
+	u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
+	u8 opcode /* Event Opcode */;
+	__le16 reserved0 /* Reserved */;
+	__le16 echo /* Echo value from ramrod data on the host */;
+	u8 fw_return_code /* FW return code for SP ramrods */;
+	u8 flags;
+/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
+#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
+#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+	union event_ring_data data;
+};
+
+/*
  * Event Ring Next Page Address
  */
 struct event_ring_next_addr {
@@ -1211,6 +1288,18 @@ enum fw_flow_ctrl_mode {
 
 
 /*
+ * GFT profile type.
+ */
+enum gft_profile_type {
+	GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */,
+/* L4 destination port, IP type and L4 type match. */
+	GFT_PROFILE_TYPE_L4_DST_PORT,
+	GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */,
+	MAX_GFT_PROFILE_TYPE
+};
+
+
+/*
  * Major and Minor hsi Versions
  */
 struct hsi_fp_ver_struct {
@@ -1311,6 +1400,34 @@ struct mstorm_vf_zone {
 
 
 /*
+ * vlan header including TPID and TCI fields
+ */
+struct vlan_header {
+	__le16 tpid /* Tag Protocol Identifier */;
+	__le16 tci /* Tag Control Information */;
+};
+
+/*
+ * outer tag configurations
+ */
+struct outer_tag_config_struct {
+/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with
+ * Host Control mode. Else - 0
+ */
+	u8 enable_stag_pri_change;
+/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+	u8 pri_map_valid;
+	u8 reserved[2];
+/* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol
+ * identifier and outer tag control information
+ */
+	struct vlan_header outer_tag;
+/* Map from inner to outer priority. Set pri_map_valid when init map */
+	u8 inner_to_outer_pri_map[8];
+};
+
+
+/*
  * personality per PF
  */
 enum personality_type {
@@ -1361,7 +1478,6 @@ struct pf_start_ramrod_data {
 	struct regpair consolid_q_pbl_addr;
 /* tunnel configuration. */
 	struct pf_start_tunnel_config tunnel_config;
-	__le32 reserved;
 	__le16 event_ring_sb_id /* Status block ID */;
 /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
 	u8 base_vf_id;
@@ -1381,16 +1497,11 @@ struct pf_start_ramrod_data {
 	u8 integ_phase /* Integration phase */;
 /* If set, inter-pf tx switching is allowed in Switch Independent func mode */
 	u8 allow_npar_tx_switching;
-/* Map from inner to outer priority. Set pri_map_valid when init map */
-	u8 inner_to_outer_pri_map[8];
-/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
-	u8 pri_map_valid;
-/* In case mf_mode is MF_OVLAN, this field specifies the outer vlan
- * (lower 16 bits) and ethType to use (higher 16 bits)
- */
-	__le32 outer_tag;
+	u8 reserved0;
 /* FP HSI version to be used by FW */
 	struct hsi_fp_ver_struct hsi_fp_ver;
+/* Outer tag configurations */
+	struct outer_tag_config_struct outer_tag_config;
 };
 
 
@@ -1441,15 +1552,19 @@ struct pf_update_tunnel_config {
  * Data for port update ramrod
  */
 struct pf_update_ramrod_data {
-	u8 pf_id;
-	u8 update_eth_dcb_data_mode /* Update Eth DCB  data indication */;
-	u8 update_fcoe_dcb_data_mode /* Update FCOE DCB  data indication */;
-	u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB  data indication */;
+/* Update Eth DCB  data indication (use enum dcb_dscp_update_mode) */
+	u8 update_eth_dcb_data_mode;
+/* Update FCOE DCB  data indication (use enum dcb_dscp_update_mode) */
+	u8 update_fcoe_dcb_data_mode;
+/* Update iSCSI DCB  data indication (use enum dcb_dscp_update_mode) */
+	u8 update_iscsi_dcb_data_mode;
 	u8 update_roce_dcb_data_mode /* Update ROCE DCB  data indication */;
 /* Update RROCE (RoceV2) DCB  data indication */
 	u8 update_rroce_dcb_data_mode;
 	u8 update_iwarp_dcb_data_mode /* Update IWARP DCB  data indication */;
 	u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+/* Update Enable STAG Priority Change indication */
+	u8 update_enable_stag_pri_change;
 	struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
 	struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
 /* core iscsi related fields */
@@ -1460,7 +1575,11 @@ struct pf_update_ramrod_data {
 /* core iwarp related fields */
 	struct protocol_dcb_data iwarp_dcb_data;
 	__le16 mf_vlan /* new outer vlan id value */;
-	__le16 reserved;
+/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis
+ * and UFP with Host Control mode, else - 0.
+ */
+	u8 enable_stag_pri_change;
+	u8 reserved;
 /* tunnel configuration. */
 	struct pf_update_tunnel_config tunnel_config;
 };
@@ -1745,6 +1864,7 @@ enum vf_zone_size_mode {
 
 
 
+
 /*
  * Attentions status block
  */
@@ -1758,17 +1878,6 @@ struct atten_status_block {
 
 
 /*
- * Igu cleanup bit values to distinguish between clean or producer consumer
- * update.
- */
-enum command_type_bit {
-	IGU_COMMAND_TYPE_NOP = 0,
-	IGU_COMMAND_TYPE_SET = 1,
-	MAX_COMMAND_TYPE_BIT
-};
-
-
-/*
  * DMAE command
  */
 struct dmae_cmd {
@@ -2200,23 +2309,23 @@ struct qm_rf_opportunistic_mask {
 /*
  * QM hardware structure of QM map memory
  */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
 	__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1 /* PQ active */
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
-#define QM_RF_PQ_MAP_RL_ID_MASK             0xFF /* RL ID */
-#define QM_RF_PQ_MAP_RL_ID_SHIFT            1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK          0x1 /* PQ active */
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK             0xFF /* RL ID */
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT            1
 /* the first PQ associated with the VPORT and VOQ of this PQ */
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
-#define QM_RF_PQ_MAP_VOQ_MASK               0x1F /* VOQ */
-#define QM_RF_PQ_MAP_VOQ_SHIFT              18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK          0x1 /* RL active */
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
-#define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT         26
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT         9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK               0x1F /* VOQ */
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT              18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK          0x1 /* RL active */
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT         25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK          0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT         26
 };
 
 
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 7443ff9..ebb6648 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1053,7 +1053,7 @@ enum dbg_status {
 	DBG_STATUS_MCP_TRACE_NO_META,
 	DBG_STATUS_MCP_COULD_NOT_HALT,
 	DBG_STATUS_MCP_COULD_NOT_RESUME,
-	DBG_STATUS_DMAE_FAILED,
+	DBG_STATUS_RESERVED2,
 	DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
 	DBG_STATUS_IGU_FIFO_BAD_DATA,
 	DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -1107,7 +1107,9 @@ struct dbg_tools_data {
 	u8 chip_id /* Chip ID (from enum chip_ids) */;
 	u8 platform_id /* Platform ID */;
 	u8 initialized /* Indicates if the data was initialized */;
-	u8 reserved;
+	u8 use_dmae /* Indicates if DMAE should be used */;
+/* Numbers of registers that were read since last log */
+	__le32 num_regs_read;
 };
 
 
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 397c408..ffbf5c7 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -669,7 +669,7 @@ struct mstorm_eth_conn_st_ctx {
 /*
  * eth connection context
  */
-struct eth_conn_context {
+struct e4_eth_conn_context {
 /* tstorm storm context */
 	struct tstorm_eth_conn_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2] /* padding */;
@@ -765,6 +765,7 @@ enum eth_event_opcode {
 	ETH_EVENT_RX_DELETE_UDP_FILTER,
 	ETH_EVENT_RX_CREATE_GFT_ACTION,
 	ETH_EVENT_RX_GFT_UPDATE_FILTER,
+	ETH_EVENT_TX_QUEUE_UPDATE,
 	MAX_ETH_EVENT_OPCODE
 };
 
@@ -882,6 +883,7 @@ enum eth_ramrod_cmd_id {
 	ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
 /* RX - Add/Delete a GFT Filter to the Searcher */
 	ETH_RAMROD_GFT_UPDATE_FILTER,
+	ETH_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
 	MAX_ETH_RAMROD_CMD_ID
 };
 
@@ -1092,7 +1094,7 @@ struct eth_vport_tx_mode {
 
 
 /*
- * Ramrod data for rx create gft action
+ * GFT filter update action type.
  */
 enum gft_filter_update_action {
 	GFT_ADD_FILTER,
@@ -1101,16 +1103,6 @@ enum gft_filter_update_action {
 };
 
 
-/*
- * Ramrod data for rx create gft action
- */
-enum gft_logic_filter_type {
-	GFT_FILTER_TYPE /* flow FW is GFT-logic as well */,
-	RFS_FILTER_TYPE /* flow FW is A-RFS-logic */,
-	MAX_GFT_LOGIC_FILTER_TYPE
-};
-
-
 
 
 /*
@@ -1166,7 +1158,7 @@ struct rx_create_openflow_action_data {
  */
 struct rx_queue_start_ramrod_data {
 	__le16 rx_queue_id /* ID of RX queue */;
-	__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+	__le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
 	__le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
 	__le16 sb_id /* Status block ID */;
 	u8 sb_index /* index of the protocol index */;
@@ -1254,26 +1246,34 @@ struct rx_udp_filter_data {
 
 
 /*
- * Ramrod to add filter - filter is packet headr of type of packet wished to
- * pass certin FW flow
+ * add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow
  */
 struct rx_update_gft_filter_data {
 /* Pointer to Packet Header That Defines GFT Filter */
 	struct regpair pkt_hdr_addr;
 	__le16 pkt_hdr_length /* Packet Header Length */;
-/* If is_rfs flag is set: Queue Id to associate filter with else: action icid */
-	__le16 rx_qid_or_action_icid;
-/* Field is used if is_rfs flag is set: vport Id of which to associate filter
- * with
- */
-	u8 vport_id;
-/* Use enum to set type of flow using gft HW logic blocks */
-	u8 filter_type;
+/* Action icid. Valid if action_icid_valid flag set. */
+	__le16 action_icid;
+	__le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
+	__le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
+	u8 vport_id /* RX vport Id. */;
+/* If set, action_icid will used for GFT filter update. */
+	u8 action_icid_valid;
+/* If set, rx_qid will used for traffic steering, in additional to vport_id.
+ * flow_id_valid must be cleared. If cleared, queue ID will selected by RSS.
+ */
+	u8 rx_qid_valid;
+/* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If
+ * cleared, flow_id 0 will reported by CQE.
+ */
+	u8 flow_id_valid;
 	u8 filter_action /* Use to set type of action on filter */;
 /* 0 - dont assert in case of error. Just return an error code. 1 - assert in
  * case of error.
  */
 	u8 assert_on_error;
+	u8 reserved[2];
 };
 
 
@@ -1344,6 +1344,17 @@ struct tx_queue_stop_ramrod_data {
 };
 
 
+/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct tx_queue_update_ramrod_data {
+	__le16 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+	__le16 qm_pq_id /* Updated QM PQ ID */;
+	__le32 reserved0;
+	struct regpair reserved1[5];
+};
+
+
 
 /*
  * Ramrod data for vport update ramrod
@@ -1388,9 +1399,9 @@ struct vport_start_ramrod_data {
 /* If set, ETH header padding will not inserted. placement_offset will be zero.
  */
 	u8 zero_placement_offset;
-/* If set, Contorl frames will be filtered according to MAC check. */
+/* If set, control frames will be filtered according to MAC check. */
 	u8 ctl_frame_mac_check_en;
-/* If set, Contorl frames will be filtered according to ethtype check. */
+/* If set, control frames will be filtered according to ethtype check. */
 	u8 ctl_frame_ethtype_check_en;
 	u8 reserved[5];
 };
@@ -1456,9 +1467,9 @@ struct vport_update_ramrod_data_cmn {
  * updated
  */
 	u8 update_ctl_frame_checks_en_flg;
-/* If set, Contorl frames will be filtered according to MAC check. */
+/* If set, control frames will be filtered according to MAC check. */
 	u8 ctl_frame_mac_check_en;
-/* If set, Contorl frames will be filtered according to ethtype check. */
+/* If set, control frames will be filtered according to ethtype check. */
 	u8 ctl_frame_ethtype_check_en;
 	u8 reserved[15];
 };
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index ad697ad..a739ba8 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -20,12 +20,12 @@
 
 #define CDU_VALIDATION_DEFAULT_CFG 61
 
-static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
 	{ 400,  336,  352,  304,  304,  384,  416,  352}, /* region 3 offsets */
 	{ 528,  496,  416,  448,  448,  512,  544,  480}, /* region 4 offsets */
 	{ 608,  544,  496,  512,  576,  592,  624,  560}  /* region 5 offsets */
 };
-static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
 	{ 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
 };
 
@@ -43,6 +43,9 @@
 /* Other PQ constants */
 #define QM_OTHER_PQS_PER_PF		4
 
+/* VOQ constants */
+#define QM_E5_NUM_EXT_VOQ		(MAX_NUM_PORTS_E5 * NUM_OF_TCS)
+
 /* WFQ constants: */
 
 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
@@ -52,18 +55,19 @@
 #define QM_WFQ_VP_PQ_VOQ_SHIFT		0
 
 /* Bit  of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_SHIFT		5
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT	5
+#define QM_WFQ_VP_PQ_PF_E5_SHIFT	6
 
 /* 0x9000 = 4*9*1024 */
 #define QM_WFQ_INC_VAL(weight)		((weight) * 0x9000)
 
-/* 0.7 * upper bound (62500000) */
-#define QM_WFQ_MAX_INC_VAL		43750000
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL		((QM_WFQ_UPPER_BOUND * 7) / 10)
 
-/* RL constants: */
+/* Number of VOQs in E5 QmWfqCrd register */
+#define QM_WFQ_CRD_E5_NUM_VOQS		16
 
-/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
-#define QM_RL_UPPER_BOUND		62500000
+/* RL constants: */
 
 /* Period in us */
 #define QM_RL_PERIOD			5
@@ -71,18 +75,32 @@
 /* Period in 25MHz cycles */
 #define QM_RL_PERIOD_CLK_25M		(25 * QM_RL_PERIOD)
 
-/* 0.7 * upper bound (62500000) */
-#define QM_RL_MAX_INC_VAL		43750000
-
 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
- * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
- * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
- * although the credit increment value was the correct one and FW calculated
- * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
- * this point.
- */
-#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
-				       QM_RL_PERIOD * 101) / (8 * 100)), 1)
+* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+* although the credit increment value was the correct one and FW calculated
+* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+* this point.
+*/
+#define QM_RL_INC_VAL(rate) \
+	OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
+	(8 * 100)), 1)
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND		62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL		((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed) \
+	((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed)	QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED	(QM_VP_RL_UPPER_BOUND(10000) - 1)
 
 /* AFullOprtnstcCrdMask constants */
 #define QM_OPPOR_LINE_VOQ_DEF		1
@@ -94,13 +112,17 @@
 /* Pure LB CmdQ lines (+spare) */
 #define PBF_CMDQ_PURE_LB_LINES		150
 
-#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
-	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO	8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+	 ext_voq * \
 	 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
 	  PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
 
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
-	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+	 ext_voq * \
 	 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
 	  PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
 
@@ -140,25 +162,58 @@
 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
 	SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
 
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
-	((port) * (max_phys_tcs_per_port) + (tc))
-#define LB_VOQ(port)				 (MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phys_tcs_per_port) \
-	((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
-				 LB_VOQ(port))
-
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
+			  vp_pq_id, rl_id, ext_voq, wrr) \
+	do {						\
+		OSAL_MEMSET(&map, 0, sizeof(map)); \
+		SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
+		SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
+		SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
+		SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
+		SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
+		SET_FIELD(map.reg, \
+			  QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
+		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
+			     *((u32 *)&map)); \
+	} while (0)
+
+#define WRITE_PQ_INFO_TO_RAM		1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl)	\
+	(((vp) << 0) | ((pf) << 12) | ((tc) << 16) |    \
+	 ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+	(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4)
 
 /******************** INTERNAL IMPLEMENTATION *********************/
 
+/* Returns the external VOQ number */
+static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
+			    u8 port_id,
+			    u8 tc,
+			    u8 max_phys_tcs_per_port)
+{
+	if (tc == PURE_LB_TC)
+		return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
+	else
+		return port_id * (max_phys_tcs_per_port) + tc;
+}
+
 /* Prepare PF RL enable/disable runtime init values */
 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
 {
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
 	if (pf_rl_en) {
+		u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+		u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
 		/* Enable RLs for all VOQs */
 		STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
-			     (1 << MAX_NUM_VOQS) - 1);
+			     (u32)voq_bit_mask);
+#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
+		if (num_ext_voqs >= 32)
+			STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+				     (u32)(voq_bit_mask >> 32));
+#endif
 
 		/* Write RL period */
 		STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
@@ -169,7 +224,7 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
 		/* Set credit threshold for QM bypass flow */
 		if (QM_BYPASS_EN)
 			STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
-				     QM_RL_UPPER_BOUND);
+				     QM_PF_RL_UPPER_BOUND);
 	}
 }
 
@@ -200,7 +255,7 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
 		if (QM_BYPASS_EN)
 			STORE_RT_REG(p_hwfn,
 				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
-				     QM_RL_UPPER_BOUND);
+				     QM_VP_RL_BYPASS_THRESH_SPEED);
 	}
 }
 
@@ -220,17 +275,19 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
  * the specified VOQ
  */
 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
-					 u8 voq, u16 cmdq_lines)
+					 u8 ext_voq,
+					 u16 cmdq_lines)
 {
 	u32 qm_line_crd;
 
 	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
 
-	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
 			 (u32)cmdq_lines);
-	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
-	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
-		     qm_line_crd);
+	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+			 qm_line_crd);
+	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
+			 qm_line_crd);
 }
 
 /* Prepare runtime init values to allocate PBF command queue lines. */
@@ -240,11 +297,12 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
 				     struct init_qm_port_params
 				     port_params[MAX_NUM_PORTS])
 {
-	u8 tc, voq, port_id, num_tcs_in_port;
+	u8 tc, ext_voq, port_id, num_tcs_in_port;
+	u8 num_ext_voqs = MAX_NUM_VOQS_E4;
 
-	/* Clear PBF lines for all VOQs */
-	for (voq = 0; voq < MAX_NUM_VOQS; voq++)
-		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+	/* Clear PBF lines of all VOQs */
+	for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
 
 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
 		u16 phys_lines, phys_lines_per_tc;
@@ -252,31 +310,35 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
 		if (!port_params[port_id].active)
 			continue;
 
-		/* Find #lines to divide between the active physical TCs */
-		phys_lines = port_params[port_id].num_pbf_cmd_lines -
-			     PBF_CMDQ_PURE_LB_LINES;
+		/* Find number of command queue lines to divide between the
+		 * active physical TCs. In E5, 1/8 of the lines are reserved.
+		 * the lines for pure LB TC are subtracted.
+		 */
+		phys_lines = port_params[port_id].num_pbf_cmd_lines;
+		phys_lines -= PBF_CMDQ_PURE_LB_LINES;
 
 		/* Find #lines per active physical TC */
 		num_tcs_in_port = 0;
-		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+		for (tc = 0; tc < max_phys_tcs_per_port; tc++)
 			if (((port_params[port_id].active_phys_tcs >> tc) &
 			      0x1) == 1)
 				num_tcs_in_port++;
 		phys_lines_per_tc = phys_lines / num_tcs_in_port;
 
 		/* Init registers per active TC */
-		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+		for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+			ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+						    max_phys_tcs_per_port);
 			if (((port_params[port_id].active_phys_tcs >> tc) &
-			      0x1) == 1) {
-				voq = PHYS_VOQ(port_id, tc,
-					       max_phys_tcs_per_port);
-				ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+			    0x1) == 1)
+				ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
 							     phys_lines_per_tc);
-			}
 		}
 
 		/* Init registers for pure LB TC */
-		ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+		ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+					    max_phys_tcs_per_port);
+		ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
 					     PBF_CMDQ_PURE_LB_LINES);
 	}
 }
@@ -308,7 +370,7 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
 				     port_params[MAX_NUM_PORTS])
 {
 	u32 usable_blocks, pure_lb_blocks, phys_blocks;
-	u8 tc, voq, port_id, num_tcs_in_port;
+	u8 tc, ext_voq, port_id, num_tcs_in_port;
 
 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
 		if (!port_params[port_id].active)
@@ -339,18 +401,19 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
 		/* Init physical TCs */
 		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
 			if (((port_params[port_id].active_phys_tcs >> tc) &
-			      0x1) == 1) {
-				voq = PHYS_VOQ(port_id, tc,
-					       max_phys_tcs_per_port);
+			     0x1) == 1) {
+				ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+							 max_phys_tcs_per_port);
 				STORE_RT_REG(p_hwfn,
-					     PBF_BTB_GUARANTEED_RT_OFFSET(voq),
-					     phys_blocks);
+					PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+					phys_blocks);
 			}
 		}
 
 		/* Init pure LB TC */
-		STORE_RT_REG(p_hwfn,
-			     PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
+		ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+					    max_phys_tcs_per_port);
+		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
 			     pure_lb_blocks);
 	}
 }
@@ -400,12 +463,12 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 	/* Go over all Tx PQs */
 	for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
 		u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
-		struct qm_rf_pq_map tx_pq_map;
+		u8 ext_voq, vport_id_in_pf;
 		bool is_vf_pq, rl_valid;
-		u8 voq, vport_id_in_pf;
 		u16 first_tx_pq_id;
 
-		voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+		ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+					    max_phys_tcs_per_port);
 		is_vf_pq = (i >= num_pf_pqs);
 		rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
 			   max_qm_global_rls;
@@ -415,16 +478,17 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 		first_tx_pq_id =
 		vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
 		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+			u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+				       (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
+
 			/* Create new VP PQ */
 			vport_params[vport_id_in_pf].
 			    first_tx_pq_id[pq_params[i].tc_id] = pq_id;
 			first_tx_pq_id = pq_id;
 
 			/* Map VP PQ to VOQ and PF */
-			STORE_RT_REG(p_hwfn,
-				     QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
-				     (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
-							QM_WFQ_VP_PQ_PF_SHIFT));
+			STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
+				     first_tx_pq_id, map_val);
 		}
 
 		/* Check RL ID */
@@ -433,26 +497,29 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 			DP_NOTICE(p_hwfn, true,
 				  "Invalid VPORT ID for rate limiter config\n");
 
-		/* Fill PQ map entry */
-		OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
-			  rl_valid ? 1 : 0);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-			  rl_valid ? pq_params[i].vport_id : 0);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
-			  pq_params[i].wrr_group);
-
-		/* Write PQ map entry to CAM */
-		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
-			     *((u32 *)&tx_pq_map));
+		/* Prepare PQ map entry */
+		struct qm_rf_pq_map_e4 tx_pq_map;
+		QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
+				  1 : 0,
+				  first_tx_pq_id, rl_valid ?
+				  pq_params[i].vport_id : 0,
+				  ext_voq, pq_params[i].wrr_group);
 
 		/* Set base address */
 		STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
 			     mem_addr_4kb);
 
+		/* Write PQ info to RAM */
+		if (WRITE_PQ_INFO_TO_RAM != 0) {
+			u32 pq_info = 0;
+			pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
+						  pq_params[i].tc_id, port_id,
+						  rl_valid ? 1 : 0, rl_valid ?
+						  pq_params[i].vport_id : 0);
+			ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+				 pq_info);
+		}
+
 		/* If VF PQ, add indication to PQ VF mask */
 		if (is_vf_pq) {
 			tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
@@ -517,13 +584,9 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
 				struct init_qm_pq_params *pq_params)
 {
 	u32 inc_val, crd_reg_offset;
-	u8 voq;
+	u8 ext_voq;
 	u16 i;
 
-	crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
-			  QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
-			 (pf_id % MAX_NUM_PFS_BB);
-
 	inc_val = QM_WFQ_INC_VAL(pf_wfq);
 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, true,
@@ -532,14 +595,21 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
 	}
 
 	for (i = 0; i < num_tx_pqs; i++) {
-		voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
-		OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
+		ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+					    max_phys_tcs_per_port);
+		crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
+				  QM_REG_WFQPFCRD_RT_OFFSET :
+				  QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+				 ext_voq * MAX_NUM_PFS_BB +
+				 (pf_id % MAX_NUM_PFS_BB);
+		OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
 				 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
+			     QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id,
+			     inc_val);
 	}
 
-	STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
-		     QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
-	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
 	return 0;
 }
 
@@ -551,7 +621,7 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
 	u32 inc_val;
 
 	inc_val = QM_RL_INC_VAL(pf_rl);
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, true,
 			  "Invalid PF rate limit configuration\n");
 		return -1;
@@ -560,7 +630,7 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
 		     (u32)QM_RL_CRD_REG_SIGN_BIT);
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
-		     QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+		     QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
 
 	return 0;
@@ -611,6 +681,7 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
 				  u8 start_vport,
 				  u8 num_vports,
+				  u32 link_speed,
 				  struct init_qm_vport_params *vport_params)
 {
 	u8 i, vport_id;
@@ -624,8 +695,9 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
 
 	/* Go over all PF VPORTs */
 	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
-		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-		if (inc_val > QM_RL_MAX_INC_VAL) {
+		inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+			  vport_params[i].vport_rl : link_speed);
+		if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
 			DP_NOTICE(p_hwfn, true,
 				  "Invalid VPORT rate-limit configuration\n");
 			return -1;
@@ -635,7 +707,8 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
 			     (u32)QM_RL_CRD_REG_SIGN_BIT);
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
-			     QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+			     QM_VP_RL_UPPER_BOUND(link_speed) |
+			     (u32)QM_RL_CRD_REG_SIGN_BIT);
 		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
 			     inc_val);
 	}
@@ -666,7 +739,9 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
 
 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
 			      struct ecore_ptt *p_ptt,
-			      u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+							  u32 cmd_addr,
+							  u32 cmd_data_lsb,
+							  u32 cmd_data_msb)
 {
 	if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
 		return false;
@@ -684,10 +759,10 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
 /******************** INTERFACE IMPLEMENTATION *********************/
 
 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
-			 u32 num_vf_cids,
-			 u32 num_tids,
-			 u16 num_pf_pqs,
-			 u16 num_vf_pqs)
+						 u32 num_vf_cids,
+						 u32 num_tids,
+						 u16 num_pf_pqs,
+						 u16 num_vf_pqs)
 {
 	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
 	    QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -758,6 +833,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
 			u8 num_vports,
 			u16 pf_wfq,
 			u32 pf_rl,
+			u32 link_speed,
 			struct init_qm_pq_params *pq_params,
 			struct init_qm_vport_params *vport_params)
 {
@@ -800,7 +876,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
 
 	/* Set VPORT RL */
 	if (ecore_vport_rl_rt_init
-	    (p_hwfn, start_vport, num_vports, vport_params))
+	    (p_hwfn, start_vport, num_vports, link_speed, vport_params))
 		return -1;
 
 	return 0;
@@ -829,7 +905,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
 	u32 inc_val;
 
 	inc_val = QM_RL_INC_VAL(pf_rl);
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, true,
 			  "Invalid PF rate limit configuration\n");
 		return -1;
@@ -869,7 +945,9 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
 }
 
 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
-			struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+			struct ecore_ptt *p_ptt, u8 vport_id,
+						u32 vport_rl,
+						u32 link_speed)
 {
 	u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
 
@@ -879,8 +957,8 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
 		return -1;
 	}
 
-	inc_val = QM_RL_INC_VAL(vport_rl);
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+	if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
 		DP_NOTICE(p_hwfn, true,
 			  "Invalid VPORT rate-limit configuration\n");
 		return -1;
@@ -1479,35 +1557,23 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
 #define RAM_LINE_SIZE sizeof(u64)
 #define REG_SIZE sizeof(u32)
 
-void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
-	struct ecore_ptt *p_ptt,
-	u16 pf_id)
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+		       struct ecore_ptt *p_ptt,
+		       u16 pf_id)
 {
-	union gft_cam_line_union cam_line;
-	struct gft_ram_line ram_line;
-	u32 i, *ram_line_ptr;
-
-	ram_line_ptr = (u32 *)&ram_line;
-
-	/* Stop using gft logic, disable gft search */
+	/* disable gft search for PF */
 	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
-	ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
 
-	/* Clean ram & cam for next rfs/gft session*/
+	/* Clean ram & cam for next gft session*/
 
 	/* Zero camline */
-	OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
-	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
-					cam_line.cam_line_mapped.camline);
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
 
 	/* Zero ramline */
-	OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
-
-	/* Each iteration write to reg */
-	for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
-		ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
-			 RAM_LINE_SIZE * pf_id +
-			 i * REG_SIZE, *(ram_line_ptr + i));
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+				RAM_LINE_SIZE * pf_id, 0);
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+				RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
 }
 
 
@@ -1525,115 +1591,110 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
 	ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
 }
 
-void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
 			       struct ecore_ptt *p_ptt,
 			       u16 pf_id,
 			       bool tcp,
 			       bool udp,
 			       bool ipv4,
-			       bool ipv6)
+			       bool ipv6,
+			       enum gft_profile_type profile_type)
 {
-	u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
-	union gft_cam_line_union camLine;
-	struct gft_ram_line ramLine;
-	u32 *ramLinePointer = (u32 *)&ramLine;
-	int i;
+	u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
 
 	if (!ipv6 && !ipv4)
-		DP_NOTICE(p_hwfn, true,
-			  "set_rfs_mode_enable: must accept at "
-			  "least on of - ipv4 or ipv6");
-
+		DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
 	if (!tcp && !udp)
-		DP_NOTICE(p_hwfn, true,
-			  "set_rfs_mode_enable: must accept at "
-			  "least on of - udp or tcp");
+		DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
+	if (profile_type >= MAX_GFT_PROFILE_TYPE)
+		DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
 
 	/* Set RFS event ID to be awakened i Tstorm By Prs */
-	rfs_cm_hdr_event_id |=  T_ETH_PACKET_MATCH_RFS_EVENTID <<
-	    PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
-	rfs_cm_hdr_event_id |=  PARSER_ETH_CONN_CM_HDR <<
-	    PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
-	ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+	reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+		  PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+	reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
 
-	/* Configure Registers for RFS mode */
+	/* Do not load context only cid in PRS on match. */
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
 
-	/* Enable gft search */
-	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
-	ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
-							     * context only cid
-							     * in PRS on match
-							     */
-	camLine.cam_line_mapped.camline = 0;
+	/* Do not use tenant ID exist bit for gft search*/
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
 
-	/* Cam line is now valid!! */
-	SET_FIELD(camLine.cam_line_mapped.camline,
-		  GFT_CAM_LINE_MAPPED_VALID, 1);
+	/* Set Cam */
+	cam_line = 0;
+	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
 
 	/* Filters are per PF!! */
-	SET_FIELD(camLine.cam_line_mapped.camline,
-		  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
-	SET_FIELD(camLine.cam_line_mapped.camline,
-		  GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
 
 	if (!(tcp && udp)) {
-		SET_FIELD(camLine.cam_line_mapped.camline,
+		SET_FIELD(cam_line,
 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
 		if (tcp)
-			SET_FIELD(camLine.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
 				  GFT_PROFILE_TCP_PROTOCOL);
 		else
-			SET_FIELD(camLine.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
 				  GFT_PROFILE_UDP_PROTOCOL);
 	}
 
 	if (!(ipv4 && ipv6)) {
-		SET_FIELD(camLine.cam_line_mapped.camline,
-			  GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+		SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
 		if (ipv4)
-			SET_FIELD(camLine.cam_line_mapped.camline,
-				  GFT_CAM_LINE_MAPPED_IP_VERSION,
+			SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
 				  GFT_PROFILE_IPV4);
 		else
-			SET_FIELD(camLine.cam_line_mapped.camline,
-				  GFT_CAM_LINE_MAPPED_IP_VERSION,
+			SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
 				  GFT_PROFILE_IPV6);
 	}
 
 	/* Write characteristics to cam */
 	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
-	    camLine.cam_line_mapped.camline);
-	camLine.cam_line_mapped.camline =
-	    ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+		 cam_line);
+	cam_line = ecore_rd(p_hwfn, p_ptt,
+			    PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
 
 	/* Write line to RAM - compare to filter 4 tuple */
-	ramLine.lo = 0;
-	ramLine.hi = 0;
-	SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
-	SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
-	SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
-	SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
-	SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
-	SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
-
-	/* Each iteration write to reg */
-	for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
-		ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
-			 RAM_LINE_SIZE * pf_id +
-			 i * REG_SIZE, *(ramLinePointer + i));
+	ram_line_lo = 0;
+	ram_line_hi = 0;
+
+	if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+	} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+	} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+	}
+
+	ecore_wr(p_hwfn, p_ptt,
+		 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+		 ram_line_lo);
+	ecore_wr(p_hwfn, p_ptt,
+		 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+		 REG_SIZE, ram_line_hi);
 
 	/* Set default profile so that no filter match will happen */
-	ramLine.lo = 0xffffffff;
-	ramLine.hi = 0x3ff;
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+		 PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+		 PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
 
-	for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
-		ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
-			 RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
-			 i * REG_SIZE, *(ramLinePointer + i));
+	/* Enable gft search */
+	ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
 }
 
 /* Configure VF zone size mode */
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index a258bd1..ab560e5 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -27,10 +27,10 @@
  * @return The required host memory size in 4KB units.
  */
 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
-			 u32 num_vf_cids,
-			 u32 num_tids,
-			 u16 num_pf_pqs,
-			 u16 num_vf_pqs);
+						 u32 num_vf_cids,
+						 u32 num_tids,
+						 u16 num_pf_pqs,
+						 u16 num_vf_pqs);
 
 /**
  * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
@@ -77,6 +77,7 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
  *		   be 0. otherwise, the weight must be non-zero.
  * @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
  *                configure. ignored if PF RL is globally disabled.
+ * @param link_speed -		  link speed in Mbps.
  * @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
  *                    each Tx PQ associated with the specified PF.
  * @param vport_params - array of size num_vports with parameters for each
@@ -99,6 +100,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
 			u8 num_vports,
 			u16 pf_wfq,
 			u32 pf_rl,
+			u32 link_speed,
 			struct init_qm_pq_params *pq_params,
 			struct init_qm_vport_params *vport_params);
 
@@ -153,17 +155,19 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
  * @brief ecore_init_vport_rl - Initializes the rate limit of the specified
  * VPORT.
  *
- * @param p_hwfn	- HW device data
- * @param p_ptt		- ptt window used for writing the registers
- * @param vport_id	- VPORT ID
- * @param vport_rl	- rate limit in Mb/sec units
+ * @param p_hwfn -	       HW device data
+ * @param p_ptt -	       ptt window used for writing the registers
+ * @param vport_id -   VPORT ID
+ * @param vport_rl -   rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
  *
  * @return 0 on success, -1 on error.
  */
 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
 						struct ecore_ptt *p_ptt,
 						u8 vport_id,
-						u32 vport_rl);
+						u32 vport_rl,
+						u32 link_speed);
 
 /**
  * @brief ecore_send_qm_stop_cmd  Sends a stop command to the QM
@@ -264,7 +268,8 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
  * @param p_hwfn -	    HW device data
  * @param ethType - etherType to configure
  */
-void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+				      u32 ethType);
 #endif /* UNUSED_HSI_FUNC */
 
 /**
@@ -333,33 +338,35 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
 				   struct ecore_ptt *p_ptt);
 
 /**
- * @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
+ * @brief ecore_gft_disable - Disable and GFT
  *
  * @param p_hwfn -   HW device data
  * @param p_ptt -   ptt window used for writing the registers.
- * @param pf_id - pf on which to disable RFS.
+ * @param pf_id - pf on which to disable GFT.
  */
-void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
-				struct ecore_ptt *p_ptt,
-				u16 pf_id);
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+						struct ecore_ptt *p_ptt,
+						u16 pf_id);
 
 /**
-* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
+ * @brief ecore_gft_config - Enable and configure HW for GFT
 *
 * @param p_ptt	- ptt window used for writing the registers.
-* @param pf_id	- pf on which to enable RFS.
+ * @param pf_id - pf on which to enable GFT.
 * @param tcp	- set profile tcp packets.
 * @param udp	- set profile udp  packet.
 * @param ipv4	- set profile ipv4 packet.
 * @param ipv6	- set profile ipv6 packet.
+ * @param profile_type -  define packet same fields. Use enum gft_profile_type.
 */
-void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
 	struct ecore_ptt *p_ptt,
 	u16 pf_id,
 	bool tcp,
 	bool udp,
 	bool ipv4,
-	bool ipv6);
+	bool ipv6,
+	enum gft_profile_type profile_type);
 #endif /* UNUSED_HSI_FUNC */
 
 /**
@@ -413,8 +420,10 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
  * @param ctx_type -	context type.
  * @param cid -		context cid.
  */
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
-				       u8 ctx_type, u32 cid);
+void ecore_calc_session_ctx_validation(void *p_ctx_mem,
+				       u16 ctx_size,
+				       u8 ctx_type,
+				       u32 cid);
 
 /**
  * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
@@ -425,8 +434,11 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
  * @param ctx_type -	context type.
  * @param tid -		    context tid.
  */
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
+void ecore_calc_task_ctx_validation(void *p_ctx_mem,
+				    u16 ctx_size,
+				    u8 ctx_type,
 				    u32 tid);
+
 /**
  * @brief ecore_memset_session_ctx - Memset session context to 0 while
  * preserving validation bytes.
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index d86f56e..56ad1e7 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -30,7 +30,7 @@ struct ecore_pi_info {
 struct ecore_sb_sp_info {
 	struct ecore_sb_info sb_info;
 	/* per protocol index data */
-	struct ecore_pi_info pi_info_arr[PIS_PER_SB];
+	struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
 };
 
 enum ecore_attention_type {
@@ -1492,7 +1492,7 @@ static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
 	if (IS_VF(p_hwfn->p_dev))
 		return;/* @@@TBD MichalK- VF CAU... */
 
-	sb_offset = igu_sb_id * PIS_PER_SB;
+	sb_offset = igu_sb_id * PIS_PER_SB_E4;
 	OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
 
 	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
@@ -2677,10 +2677,11 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
 	p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
 				    IGU_REG_CONSUMER_MEM + sbid * 4);
 
-	for (i = 0; i < PIS_PER_SB; i++)
+	for (i = 0; i < PIS_PER_SB_E4; i++)
 		p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
 					      CAU_REG_PI_MEMORY +
-					      sbid * 4 * PIS_PER_SB +  i * 4);
+					      sbid * 4 * PIS_PER_SB_E4 +
+					      i * 4);
 
 	return ECORE_SUCCESS;
 }
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index b655685..563051c 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -19,7 +19,7 @@
 #define ECORE_SB_EVENT_MASK	0x0003
 
 #define SB_ALIGNED_SIZE(p_hwfn)					\
-	ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+	ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
 
 #define ECORE_SB_INVALID_IDX	0xffff
 
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index 49d0fac..24cdf5e 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -26,7 +26,7 @@ enum ecore_int_mode {
 #endif
 
 struct ecore_sb_info {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	u32 sb_ack;		/* Last given ack */
 	u16 igu_sb_id;
@@ -44,7 +44,7 @@ struct ecore_sb_info {
 struct ecore_sb_info_dbg {
 	u32 igu_prod;
 	u32 igu_cons;
-	u16 pi[PIS_PER_SB];
+	u16 pi[PIS_PER_SB_E4];
 };
 
 struct ecore_sb_cnt_info {
@@ -67,7 +67,7 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
 	/* barrier(); status block is written to by the chip */
 	/* FIXME: need some sort of barrier. */
 	prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
-	    STATUS_BLOCK_PROD_INDEX_MASK;
+	    STATUS_BLOCK_E4_PROD_INDEX_MASK;
 	if (sb_info->sb_ack != prod) {
 		sb_info->sb_ack = prod;
 		rc |= ECORE_SB_IDX;
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 4ec6217..1f0edeb 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -714,7 +714,7 @@ enum _ecore_status_t
  * @param p_hwfn
  * @param rel_vf_id
  *
- * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * @return MAX_NUM_VFS_E4 in case no further active VFs, otherwise index.
  */
 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
 
@@ -724,7 +724,7 @@ void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
 
 #define ecore_for_each_vf(_p_hwfn, _i)					\
 	for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0);		\
-	     _i < E4_MAX_NUM_VFS;					\
+	     _i < MAX_NUM_VFS_E4;					\
 	     _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
 
 #endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index b4bfe89..360d7f8 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -193,5 +193,13 @@
 #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
 	((roce_pf_id) * IRO[48].m1))
 #define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[49].base + \
+	((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[50].base + \
+	((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
 
 #endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index bc8df8f..41532ee 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -9,13 +9,13 @@
 #ifndef __IRO_VALUES_H__
 #define __IRO_VALUES_H__
 
-static const struct iro iro_arr[49] = {
+static const struct iro iro_arr[51] = {
 /* YSTORM_FLOW_CONTROL_MODE_OFFSET */
 	{      0x0,      0x0,      0x0,      0x0,      0x8},
 /* TSTORM_PORT_STAT_OFFSET(port_id) */
 	{   0x4cb0,     0x80,      0x0,      0x0,     0x80},
 /* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
-	{   0x6518,     0x20,      0x0,      0x0,     0x20},
+	{   0x6508,     0x20,      0x0,      0x0,     0x20},
 /* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
 	{    0xb00,      0x8,      0x0,      0x0,      0x4},
 /* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -29,9 +29,9 @@
 /* XSTORM_INTEG_TEST_DATA_OFFSET */
 	{   0x4c40,      0x0,      0x0,      0x0,     0x78},
 /* YSTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x3df0,      0x0,      0x0,      0x0,     0x78},
+	{   0x3e10,      0x0,      0x0,      0x0,     0x78},
 /* PSTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x29b0,      0x0,      0x0,      0x0,     0x78},
+	{   0x2b50,      0x0,      0x0,      0x0,     0x78},
 /* TSTORM_INTEG_TEST_DATA_OFFSET */
 	{   0x4c38,      0x0,      0x0,      0x0,     0x78},
 /* MSTORM_INTEG_TEST_DATA_OFFSET */
@@ -41,11 +41,11 @@
 /* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
 	{    0xa28,      0x8,      0x0,      0x0,      0x8},
 /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
-	{   0x61f8,     0x10,      0x0,      0x0,     0x10},
+	{   0x61e8,     0x10,      0x0,      0x0,     0x10},
 /* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
-	{   0xbd20,     0x30,      0x0,      0x0,     0x30},
+	{   0xb820,     0x30,      0x0,      0x0,     0x30},
 /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
-	{   0x95b8,     0x30,      0x0,      0x0,     0x30},
+	{   0x96b8,     0x30,      0x0,      0x0,     0x30},
 /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
 	{   0x4b60,     0x80,      0x0,      0x0,     0x40},
 /* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
@@ -59,11 +59,11 @@
 /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
 	{   0x8150,     0x40,      0x0,      0x0,     0x30},
 /* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
-	{   0xec70,     0x60,      0x0,      0x0,     0x60},
+	{   0xe770,     0x60,      0x0,      0x0,     0x60},
 /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
-	{   0x2b48,     0x80,      0x0,      0x0,     0x38},
+	{   0x2ce8,     0x80,      0x0,      0x0,     0x38},
 /* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
-	{   0xf1b0,     0x78,      0x0,      0x0,     0x78},
+	{   0xf2b0,     0x78,      0x0,      0x0,     0x78},
 /* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
 	{    0x1f8,      0x4,      0x0,      0x0,      0x4},
 /* TSTORM_ETH_PRS_INPUT_OFFSET */
@@ -81,33 +81,37 @@
 /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
 	{      0x0,      0x8,      0x0,      0x0,      0x8},
 /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
-	{    0x200,     0x10,      0x8,      0x0,      0x8},
+	{    0x200,     0x18,      0x8,      0x0,      0x8},
 /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
-	{    0xb78,     0x10,      0x8,      0x0,      0x2},
+	{    0xb78,     0x18,      0x8,      0x0,      0x2},
 /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
-	{   0xd9a8,     0x38,      0x0,      0x0,     0x24},
+	{   0xd878,     0x50,      0x0,      0x0,     0x3c},
 /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
-	{  0x12988,     0x10,      0x0,      0x0,      0x8},
+	{  0x12908,     0x18,      0x0,      0x0,     0x10},
 /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
-	{  0x11fa0,     0x38,      0x0,      0x0,     0x18},
+	{  0x11aa8,     0x40,      0x0,      0x0,     0x18},
 /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-	{   0xa8c0,     0x38,      0x0,      0x0,     0x10},
+	{   0xa580,     0x50,      0x0,      0x0,     0x20},
 /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-	{   0x86f8,     0x30,      0x0,      0x0,     0x18},
+	{   0x86f8,     0x40,      0x0,      0x0,     0x28},
 /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-	{  0x101f8,     0x10,      0x0,      0x0,     0x10},
+	{  0x102f8,     0x18,      0x0,      0x0,     0x10},
 /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
 	{   0xde28,     0x48,      0x0,      0x0,     0x38},
 /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
-	{  0x10660,     0x20,      0x0,      0x0,     0x20},
+	{  0x10760,     0x20,      0x0,      0x0,     0x20},
 /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
-	{   0x2b80,     0x80,      0x0,      0x0,     0x10},
+	{   0x2d20,     0x80,      0x0,      0x0,     0x10},
 /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
 	{   0x5020,     0x10,      0x0,      0x0,     0x10},
 /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
 	{   0xc9b0,     0x30,      0x0,      0x0,     0x10},
 /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
 	{   0xeec0,     0x10,      0x0,      0x0,     0x10},
+/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
+	{   0xa398,     0x10,      0x0,      0x0,     0x10},
+/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
+	{  0x13100,      0x8,      0x0,      0x0,      0x8},
 };
 
 #endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 8449215..01fe880 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -2073,11 +2073,12 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
 			       struct ecore_arfs_config_params *p_cfg_params)
 {
 	if (p_cfg_params->arfs_enable) {
-		ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
-					  p_cfg_params->tcp,
-					  p_cfg_params->udp,
-					  p_cfg_params->ipv4,
-					  p_cfg_params->ipv6);
+		ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+				 p_cfg_params->tcp,
+				 p_cfg_params->udp,
+				 p_cfg_params->ipv4,
+				 p_cfg_params->ipv6,
+				 GFT_PROFILE_TYPE_4_TUPLE);
 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 			   "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
 			   p_cfg_params->tcp ? "Enable" : "Disable",
@@ -2085,7 +2086,7 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
 			   p_cfg_params->ipv6 ? "Enable" : "Disable");
 	} else {
-		ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+		ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
 	}
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
 		   p_cfg_params->arfs_enable ? "Enable" : "Disable");
@@ -2136,9 +2137,17 @@ enum _ecore_status_t
 
 	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
 	p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
-	p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+	p_ramrod->action_icid_valid = 0;
+	p_ramrod->action_icid = 0;
+
+	p_ramrod->rx_qid_valid = 1;
+	p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+	p_ramrod->flow_id_valid = 0;
+	p_ramrod->flow_id = 0;
+
 	p_ramrod->vport_id = abs_vport_id;
-	p_ramrod->filter_type = RFS_FILTER_TYPE;
 	p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
 					   : GFT_DELETE_FILTER;
 
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index 5d4b2b3..6662232 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -71,6 +71,7 @@ struct ecore_iscsi_pf_params {
 
 	u8		is_target;
 	u8		bdq_pbl_num_entries[2];
+	u8		disable_stats_collection;
 };
 
 enum ecore_rdma_protocol {
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index c9c2309..1d08581 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -28,424 +28,506 @@
 #define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET                            15
 #define DORQ_REG_PF_WAKE_ALL_RT_OFFSET                              16
 #define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET                           17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET                          18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET                          19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET                           20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET                           21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET                        22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET                       23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET                         24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                             761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                               736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                             761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                               736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET                            1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE                              736
-#define CAU_REG_PI_MEMORY_RT_OFFSET                                 2233
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET                           18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET                           19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET                    20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET                    21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET                        22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET                        23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET                        24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET                        25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET                        26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET                        27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET                        28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET                        29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET                 30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET                 31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET                 32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET                 33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET                 34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET                 35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET                 36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET                 37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET                          38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET                          39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET                           40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET                           41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET                        42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET                       43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET                         44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                             45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                               1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET                            1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE                              1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET                                 2093
 #define CAU_REG_PI_MEMORY_RT_SIZE                                   4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET                6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET                  6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET                  6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET                     6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET                     6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET                                6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET                               6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET                               6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET                       6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET                       6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET                           6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET                 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET       6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET                  6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET                           6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET                     6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET                                 6665
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET                6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET                  6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET                  6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET                     6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET                     6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET                                6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET                               6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET                               6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET                       6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET                       6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET                           6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET                 6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET       6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET                  6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET                           6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET                     6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET                                 6525
 #define SRC_REG_FIRSTFREE_RT_SIZE                                   2
-#define SRC_REG_LASTFREE_RT_OFFSET                                  6667
+#define SRC_REG_LASTFREE_RT_OFFSET                                  6527
 #define SRC_REG_LASTFREE_RT_SIZE                                    2
-#define SRC_REG_COUNTFREE_RT_OFFSET                                 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET                          6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET                            6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET                            6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET                              6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET                              6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                             6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET                            6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET                           6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET                            6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET                           6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET                            6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET                          6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET                           6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET                         6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET                          6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET                         6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET                          6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET                         6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET                          6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET                 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET               6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET               6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET                           6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET                         6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET                         6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET                       6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET                     6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET                     6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET                                6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET                            6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET                          6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET                          6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                             6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE                               22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET                               28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET                    28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET                       28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET                       28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET                          28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET                          28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET                          28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                             28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                             28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                             28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET                 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET                 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET                            28714
+#define SRC_REG_COUNTFREE_RT_OFFSET                                 6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET                          6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET                            6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET                            6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET                              6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET                              6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                             6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET                            6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET                           6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET                            6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET                           6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET                            6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET                          6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET                           6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET                         6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET                          6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET                         6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET                          6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET                         6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET                          6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET                 6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET               6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET               6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET                           6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET                         6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET                         6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET                       6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET                     6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET                     6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET                                6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET                            6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET                          6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET                          6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET                        6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET                        6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET                         6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET                         6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                             6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE                               26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET                               32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET                    32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET                       32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET                       32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET                          32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET                          32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET                          32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                             32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                             32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                             32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET                 32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET                 32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET                            32992
 #define TM_REG_CONFIG_CONN_MEM_RT_SIZE                              416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET                            29130
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET                            33408
 #define TM_REG_CONFIG_TASK_MEM_RT_SIZE                              608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET                                29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET                                29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET                                29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET                           29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET                           29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET                           29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET                           29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET                           29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET                           29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET                           29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET                           29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET                           29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET                           29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET                          29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET                          29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET                          29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET                          29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET                          29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET                          29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET                          29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET                          29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET                          29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET                          29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET                          29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET                          29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET                          29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET                          29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET                          29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET                          29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET                          29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET                          29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET                          29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET                          29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET                          29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET                          29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET                          29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET                          29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET                          29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET                          29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET                          29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET                          29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET                          29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET                          29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET                          29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET                          29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET                          29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET                          29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET                          29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET                          29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET                          29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET                          29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET                          29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET                          29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET                          29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET                          29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET                          29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET                          29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET                          29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET                          29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET                          29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET                          29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET                          29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET                          29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET                          29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET                          29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET                          29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET                          29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET                            29805
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET                                34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET                                34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET                                34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET                           34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET                           34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET                           34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET                           34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET                           34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET                           34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET                           34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET                           34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET                           34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET                           34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET                          34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET                          34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET                          34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET                          34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET                          34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET                          34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET                          34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET                          34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET                          34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET                          34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET                          34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET                          34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET                          34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET                          34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET                          34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET                          34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET                          34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET                          34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET                          34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET                          34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET                          34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET                          34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET                          34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET                          34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET                          34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET                          34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET                          34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET                          34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET                          34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET                          34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET                          34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET                          34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET                          34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET                          34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET                          34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET                          34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET                          34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET                          34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET                          34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET                          34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET                          34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET                          34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET                          34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET                          34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET                          34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET                          34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET                          34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET                          34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET                          34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET                          34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET                          34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET                          34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET                          34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET                          34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET                            34083
 #define QM_REG_BASEADDROTHERPQ_RT_SIZE                              128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET                         29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET                         29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET                          29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET                        29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET                       29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET                            29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET                            29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET                            29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET                            29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET                            29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET                            29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET                            29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET                            29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET                            29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET                            29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET                           29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET                           29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET                           29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET                           29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET                           29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET                           29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET                        29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET                        29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET                        29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET                        29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET                           29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET                           29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET                                  29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET                                  29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET                                  29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET                                  29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET                                  29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET                                  29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET                                  29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET                                  29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET                                  29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET                                  29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET                                 29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET                                 29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET                                 29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET                                 29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET                                 29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET                                 29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET                                 29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET                                 29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET                                 29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET                                 29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET                                 29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET                                 29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET                                 29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET                                 29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET                                 29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET                                 29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET                                 29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET                                 29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET                                 29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET                                 29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET                                 29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET                                 29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET                                 29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET                                 29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET                                 29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET                                 29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET                                 29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET                                 29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET                                 29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET                                 29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET                                 30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET                                 30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET                                 30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET                                 30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET                                 30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET                                 30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET                                 30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET                                 30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET                                 30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET                                 30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET                                 30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET                                 30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET                                 30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET                                 30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET                                 30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET                                 30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET                                 30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET                                 30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET                                 30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET                                 30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET                                 30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET                                 30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET                                 30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET                                 30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET                               30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET                               30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET                               30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET                               30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET                               30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET                               30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET                               30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET                               30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET                               30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET                               30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET                              30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET                              30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET                              30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET                              30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET                              30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET                              30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                             30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                             30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET                        30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET                        30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET                          30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET                          30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET                          30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET                          30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET                          30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET                          30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET                          30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET                          30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET                               30052
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET                         34211
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET                         34212
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET                          34213
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET                        34214
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET                       34215
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET                            34216
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET                            34217
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET                            34218
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET                            34219
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET                            34220
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET                            34221
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET                            34222
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET                            34223
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET                            34224
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET                            34225
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET                           34226
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET                           34227
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET                           34228
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET                           34229
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET                           34230
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET                           34231
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET                        34232
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET                        34233
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET                        34234
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET                        34235
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET                           34236
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET                           34237
+#define QM_REG_PQTX2PF_0_RT_OFFSET                                  34238
+#define QM_REG_PQTX2PF_1_RT_OFFSET                                  34239
+#define QM_REG_PQTX2PF_2_RT_OFFSET                                  34240
+#define QM_REG_PQTX2PF_3_RT_OFFSET                                  34241
+#define QM_REG_PQTX2PF_4_RT_OFFSET                                  34242
+#define QM_REG_PQTX2PF_5_RT_OFFSET                                  34243
+#define QM_REG_PQTX2PF_6_RT_OFFSET                                  34244
+#define QM_REG_PQTX2PF_7_RT_OFFSET                                  34245
+#define QM_REG_PQTX2PF_8_RT_OFFSET                                  34246
+#define QM_REG_PQTX2PF_9_RT_OFFSET                                  34247
+#define QM_REG_PQTX2PF_10_RT_OFFSET                                 34248
+#define QM_REG_PQTX2PF_11_RT_OFFSET                                 34249
+#define QM_REG_PQTX2PF_12_RT_OFFSET                                 34250
+#define QM_REG_PQTX2PF_13_RT_OFFSET                                 34251
+#define QM_REG_PQTX2PF_14_RT_OFFSET                                 34252
+#define QM_REG_PQTX2PF_15_RT_OFFSET                                 34253
+#define QM_REG_PQTX2PF_16_RT_OFFSET                                 34254
+#define QM_REG_PQTX2PF_17_RT_OFFSET                                 34255
+#define QM_REG_PQTX2PF_18_RT_OFFSET                                 34256
+#define QM_REG_PQTX2PF_19_RT_OFFSET                                 34257
+#define QM_REG_PQTX2PF_20_RT_OFFSET                                 34258
+#define QM_REG_PQTX2PF_21_RT_OFFSET                                 34259
+#define QM_REG_PQTX2PF_22_RT_OFFSET                                 34260
+#define QM_REG_PQTX2PF_23_RT_OFFSET                                 34261
+#define QM_REG_PQTX2PF_24_RT_OFFSET                                 34262
+#define QM_REG_PQTX2PF_25_RT_OFFSET                                 34263
+#define QM_REG_PQTX2PF_26_RT_OFFSET                                 34264
+#define QM_REG_PQTX2PF_27_RT_OFFSET                                 34265
+#define QM_REG_PQTX2PF_28_RT_OFFSET                                 34266
+#define QM_REG_PQTX2PF_29_RT_OFFSET                                 34267
+#define QM_REG_PQTX2PF_30_RT_OFFSET                                 34268
+#define QM_REG_PQTX2PF_31_RT_OFFSET                                 34269
+#define QM_REG_PQTX2PF_32_RT_OFFSET                                 34270
+#define QM_REG_PQTX2PF_33_RT_OFFSET                                 34271
+#define QM_REG_PQTX2PF_34_RT_OFFSET                                 34272
+#define QM_REG_PQTX2PF_35_RT_OFFSET                                 34273
+#define QM_REG_PQTX2PF_36_RT_OFFSET                                 34274
+#define QM_REG_PQTX2PF_37_RT_OFFSET                                 34275
+#define QM_REG_PQTX2PF_38_RT_OFFSET                                 34276
+#define QM_REG_PQTX2PF_39_RT_OFFSET                                 34277
+#define QM_REG_PQTX2PF_40_RT_OFFSET                                 34278
+#define QM_REG_PQTX2PF_41_RT_OFFSET                                 34279
+#define QM_REG_PQTX2PF_42_RT_OFFSET                                 34280
+#define QM_REG_PQTX2PF_43_RT_OFFSET                                 34281
+#define QM_REG_PQTX2PF_44_RT_OFFSET                                 34282
+#define QM_REG_PQTX2PF_45_RT_OFFSET                                 34283
+#define QM_REG_PQTX2PF_46_RT_OFFSET                                 34284
+#define QM_REG_PQTX2PF_47_RT_OFFSET                                 34285
+#define QM_REG_PQTX2PF_48_RT_OFFSET                                 34286
+#define QM_REG_PQTX2PF_49_RT_OFFSET                                 34287
+#define QM_REG_PQTX2PF_50_RT_OFFSET                                 34288
+#define QM_REG_PQTX2PF_51_RT_OFFSET                                 34289
+#define QM_REG_PQTX2PF_52_RT_OFFSET                                 34290
+#define QM_REG_PQTX2PF_53_RT_OFFSET                                 34291
+#define QM_REG_PQTX2PF_54_RT_OFFSET                                 34292
+#define QM_REG_PQTX2PF_55_RT_OFFSET                                 34293
+#define QM_REG_PQTX2PF_56_RT_OFFSET                                 34294
+#define QM_REG_PQTX2PF_57_RT_OFFSET                                 34295
+#define QM_REG_PQTX2PF_58_RT_OFFSET                                 34296
+#define QM_REG_PQTX2PF_59_RT_OFFSET                                 34297
+#define QM_REG_PQTX2PF_60_RT_OFFSET                                 34298
+#define QM_REG_PQTX2PF_61_RT_OFFSET                                 34299
+#define QM_REG_PQTX2PF_62_RT_OFFSET                                 34300
+#define QM_REG_PQTX2PF_63_RT_OFFSET                                 34301
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET                               34302
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET                               34303
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET                               34304
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET                               34305
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET                               34306
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET                               34307
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET                               34308
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET                               34309
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET                               34310
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET                               34311
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET                              34312
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET                              34313
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET                              34314
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET                              34315
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET                              34316
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET                              34317
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                             34318
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                             34319
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET                        34320
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET                        34321
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET                          34322
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET                          34323
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET                          34324
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET                          34325
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET                          34326
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET                          34327
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET                          34328
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET                          34329
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET                               34330
 #define QM_REG_RLGLBLINCVAL_RT_SIZE                                 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET                           30308
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET                           34586
 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE                             256
-#define QM_REG_RLGLBLCRD_RT_OFFSET                                  30564
+#define QM_REG_RLGLBLCRD_RT_OFFSET                                  34842
 #define QM_REG_RLGLBLCRD_RT_SIZE                                    256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET                               30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET                                 30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET                            30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET                                 30823
+#define QM_REG_RLGLBLENABLE_RT_OFFSET                               35098
+#define QM_REG_RLPFPERIOD_RT_OFFSET                                 35099
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET                            35100
+#define QM_REG_RLPFINCVAL_RT_OFFSET                                 35101
 #define QM_REG_RLPFINCVAL_RT_SIZE                                   16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                             30839
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                             35117
 #define QM_REG_RLPFUPPERBOUND_RT_SIZE                               16
-#define QM_REG_RLPFCRD_RT_OFFSET                                    30855
+#define QM_REG_RLPFCRD_RT_OFFSET                                    35133
 #define QM_REG_RLPFCRD_RT_SIZE                                      16
-#define QM_REG_RLPFENABLE_RT_OFFSET                                 30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET                              30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET                                30873
+#define QM_REG_RLPFENABLE_RT_OFFSET                                 35149
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET                              35150
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET                                35151
 #define QM_REG_WFQPFWEIGHT_RT_SIZE                                  16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET                            30889
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET                            35167
 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE                              16
-#define QM_REG_WFQPFCRD_RT_OFFSET                                   30905
+#define QM_REG_WFQPFCRD_RT_OFFSET                                   35183
 #define QM_REG_WFQPFCRD_RT_SIZE                                     256
-#define QM_REG_WFQPFENABLE_RT_OFFSET                                31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET                                31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET                               31163
+#define QM_REG_WFQPFENABLE_RT_OFFSET                                35439
+#define QM_REG_WFQVPENABLE_RT_OFFSET                                35440
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET                               35441
 #define QM_REG_BASEADDRTXPQ_RT_SIZE                                 512
-#define QM_REG_TXPQMAP_RT_OFFSET                                    31675
+#define QM_REG_TXPQMAP_RT_OFFSET                                    35953
 #define QM_REG_TXPQMAP_RT_SIZE                                      512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET                                32187
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET                                36465
 #define QM_REG_WFQVPWEIGHT_RT_SIZE                                  512
-#define QM_REG_WFQVPCRD_RT_OFFSET                                   32699
+#define QM_REG_WFQVPCRD_RT_OFFSET                                   36977
 #define QM_REG_WFQVPCRD_RT_SIZE                                     512
-#define QM_REG_WFQVPMAP_RT_OFFSET                                   33211
+#define QM_REG_WFQVPMAP_RT_OFFSET                                   37489
 #define QM_REG_WFQVPMAP_RT_SIZE                                     512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET                               33723
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET                               38001
 #define QM_REG_WFQPFCRD_MSB_RT_SIZE                                 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET                                 34043
+#define QM_REG_VOQCRDLINE_RT_OFFSET                                 38321
 #define QM_REG_VOQCRDLINE_RT_SIZE                                   36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET                             34079
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET                             38357
 #define QM_REG_VOQINITCRDLINE_RT_SIZE                               36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET                           34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET                     34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET                     34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET                     34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET                     34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET                      34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET                  34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET                           34122
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET                          38393
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET                           38394
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET                      38395
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET                     38396
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET                     38397
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET                     38398
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET                     38399
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET                  38400
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET                           38401
 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE                             4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET                      34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE                        4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET                        34130
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET                        38405
 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE                          4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET                           34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET                     34135
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET                     38409
 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE                       32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET                        34167
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET                        38441
 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE                          16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET                      34183
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET                      38457
 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE                        16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET             34199
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET             38473
 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE               16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET                   34215
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET                   38489
 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE                     16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET                              34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET                    34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                           34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                           34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                           34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                       34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                       34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                       34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                       34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET                    34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET                    34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET                    34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET                    34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                        34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                     34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET                           34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                      34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET                    34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                       34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET                34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET                    34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                       34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET                34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET                    34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                       34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET                34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET                    34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                       34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET                34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET                    34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                       34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET                34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET                    34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                       34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET                34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET                    34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                       34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET                34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET                    34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                       34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET                34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET                    34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                       34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET                34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET                    34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                       34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET                34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET                   34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                      34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET               34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET                   34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                      34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET               34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET                   34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                      34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET               34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET                   34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                      34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET               34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET                   34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                      34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET               34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET                   34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                      34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET               34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET                   34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                      34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET               34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET                   34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                      34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET               34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET                   34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                      34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET               34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET                   34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                      34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET               34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET                                34308
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET                              38505
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET                    38506
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET                         38507
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE                           8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET              38515
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE                1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET                 39539
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE                   512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET               40051
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE                 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET      40563
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE        512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET            41075
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE              512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET                    41587
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE                      32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                           41619
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                           41620
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                           41621
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                       41622
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                       41623
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                       41624
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                       41625
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET                    41626
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET                    41627
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET                    41628
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET                    41629
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                        41630
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                     41631
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET                           41632
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                      41633
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET                    41634
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                       41635
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET                41636
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET                    41637
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                       41638
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET                41639
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET                    41640
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                       41641
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET                41642
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET                    41643
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                       41644
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET                41645
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET                    41646
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                       41647
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET                41648
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET                    41649
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                       41650
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET                41651
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET                    41652
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                       41653
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET                41654
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET                    41655
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                       41656
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET                41657
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET                    41658
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                       41659
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET                41660
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET                    41661
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                       41662
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET                41663
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET                   41664
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                      41665
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET               41666
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET                   41667
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                      41668
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET               41669
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET                   41670
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                      41671
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET               41672
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET                   41673
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                      41674
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET               41675
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET                   41676
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                      41677
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET               41678
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET                   41679
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                      41680
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET               41681
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET                   41682
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                      41683
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET               41684
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET                   41685
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                      41686
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET               41687
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET                   41688
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                      41689
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET               41690
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET                   41691
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                      41692
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET               41693
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET                   41694
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET                      41695
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET               41696
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET                   41697
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET                      41698
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET               41699
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET                   41700
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET                      41701
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET               41702
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET                   41703
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET                      41704
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET               41705
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET                   41706
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET                      41707
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET               41708
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET                   41709
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET                      41710
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET               41711
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET                   41712
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET                      41713
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET               41714
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET                   41715
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET                      41716
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET               41717
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET                   41718
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET                      41719
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET               41720
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET                   41721
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET                      41722
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET               41723
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET                   41724
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET                      41725
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET               41726
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET                   41727
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET                      41728
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET               41729
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET                   41730
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET                      41731
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET               41732
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET                   41733
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET                      41734
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET               41735
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET                   41736
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET                      41737
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET               41738
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET                   41739
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET                      41740
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET               41741
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET                                41742
 
-#define RUNTIME_ARRAY_SIZE 34309
+#define RUNTIME_ARRAY_SIZE 41743
 
 #endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 2f5d453..dfa2ab0 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -347,7 +347,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 			  "Unsupported MF mode, init as DEFAULT\n");
 		p_ramrod->mf_mode = MF_NPAR;
 	}
-	p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+	p_ramrod->outer_tag_config.outer_tag.tpid = p_hwfn->hw_info.ovlan;
 
 	/* Place EQ address in RAMROD */
 	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -387,7 +387,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
 		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-		   sb, sb_index, p_ramrod->outer_tag);
+		   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid);
 
 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index a346166..75adcda 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -179,7 +179,7 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
 				    struct ecore_spq *p_spq)
 {
 	struct ecore_cxt_info cxt_info;
-	struct core_conn_context *p_cxt;
+	struct e4_core_conn_context *p_cxt;
 	enum _ecore_status_t rc;
 	u16 physical_q;
 
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index b1ab80b..ed9ace2 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1726,7 +1726,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 	/* fill in pfdev info */
 	pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
 	pfdev_info->db_size = 0;	/* @@@ TBD MichalK Vf Doorbells */
-	pfdev_info->indices_per_sb = PIS_PER_SB;
+	pfdev_info->indices_per_sb = PIS_PER_SB_E4;
 
 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -3734,11 +3734,11 @@ enum _ecore_status_t
 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
 			  struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
 {
-	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+	u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
 	int i, cnt;
 
 	/* Read initial consumers & producers */
-	for (i = 0; i < MAX_NUM_VOQS; i++) {
+	for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
 		u32 prod;
 
 		cons[i] = ecore_rd(p_hwfn, p_ptt,
@@ -3753,7 +3753,7 @@ enum _ecore_status_t
 	/* Wait for consumers to pass the producers */
 	i = 0;
 	for (cnt = 0; cnt < 50; cnt++) {
-		for (; i < MAX_NUM_VOQS; i++) {
+		for (; i < MAX_NUM_VOQS_E4; i++) {
 			u32 tmp;
 
 			tmp = ecore_rd(p_hwfn, p_ptt,
@@ -3763,7 +3763,7 @@ enum _ecore_status_t
 				break;
 		}
 
-		if (i == MAX_NUM_VOQS)
+		if (i == MAX_NUM_VOQS_E4)
 			break;
 
 		OSAL_MSLEEP(20);
@@ -4255,7 +4255,7 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
 			return i;
 
 out:
-	return E4_MAX_NUM_VFS;
+	return MAX_NUM_VFS_E4;
 }
 
 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -4624,6 +4624,7 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
 						 struct ecore_ptt *p_ptt,
 						 int vfid, int val)
 {
+	struct ecore_mcp_link_state *p_link;
 	struct ecore_vf_info *vf;
 	u8 abs_vp_id = 0;
 	enum _ecore_status_t rc;
@@ -4637,7 +4638,10 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
-	return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+	p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+	return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+				   p_link->speed);
 }
 
 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 31bdee1..4ff5425 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -16,7 +16,7 @@
 #include "ecore_l2.h"
 
 #define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
-	(E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+	(MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
 
 /* Represents a full message. Both the request filled by VF
  * and the response filled by the PF. The VF needs one copy
@@ -170,7 +170,7 @@ struct ecore_vf_info {
  * capability enabled.
  */
 struct ecore_pf_iov {
-	struct ecore_vf_info	vfs_array[E4_MAX_NUM_VFS];
+	struct ecore_vf_info	vfs_array[MAX_NUM_VFS_E4];
 	u64			pending_flr[ECORE_VF_ARRAY_LENGTH];
 
 #ifndef REMOVE_DBG
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 299efbc..ad15d28 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1220,3 +1220,5 @@
 #define DORQ_REG_DB_DROP_DETAILS_REASON 0x100a20UL
 #define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
   #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
+#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
+#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index be63f5d..48dd3b1 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -19,7 +19,7 @@
 char fw_file[PATH_MAX];
 
 const char *QEDE_DEFAULT_FIRMWARE =
-	"/lib/firmware/qed/qed_init_values-8.20.0.0.bin";
+	"/lib/firmware/qed/qed_init_values-8.30.12.0.bin";
 
 static void
 qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 8ce89e5..89ad113 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -364,12 +364,12 @@ void qede_tx_queue_release(void *tx_queue)
 		  uint16_t sb_id)
 {
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int rc;
 
 	sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
-					  sizeof(struct status_block));
+					  sizeof(struct status_block_e4));
 	if (!sb_virt) {
 		DP_ERR(edev, "Status block allocation failed\n");
 		return -ENOMEM;
@@ -379,7 +379,7 @@ void qede_tx_queue_release(void *tx_queue)
 	if (rc) {
 		DP_ERR(edev, "Status block initialization failed\n");
 		OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
-				       sizeof(struct status_block));
+				       sizeof(struct status_block_e4));
 		return rc;
 	}
 
@@ -453,7 +453,7 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 		if (fp->sb_info) {
 			OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
 				fp->sb_info->sb_phys,
-				sizeof(struct status_block));
+				sizeof(struct status_block_e4));
 			rte_free(fp->sb_info);
 			fp->sb_info = NULL;
 		}
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 42/53] net/qede/base: add UFP support
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (10 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 41/53] net/qede/base: update firmware to 8.30.12.0 Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 43/53] net/qede/base: add support for mapped doorbell Bars for VFs Rasesh Mody
                   ` (10 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Add support for UFP (Unified Fabric Port) multi-function mode.
It includes new APIs for reading UFP configuration, handling
UFP events, retriving UFP status and UFP ramrod update etc.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h             |   45 ++++++++++++++++-
 drivers/net/qede/base/ecore_dcbx.c        |   16 +++++-
 drivers/net/qede/base/ecore_dev.c         |   77 +++++++++++++++++++++--------
 drivers/net/qede/base/ecore_dev_api.h     |    8 ++-
 drivers/net/qede/base/ecore_mcp.c         |   72 +++++++++++++++++++++++++++
 drivers/net/qede/base/ecore_mcp.h         |    9 ++++
 drivers/net/qede/base/ecore_sp_commands.c |   68 +++++++++++++++++++------
 drivers/net/qede/base/ecore_sp_commands.h |   11 ++++-
 drivers/net/qede/base/mcp_public.h        |   30 +++++++++++
 drivers/net/qede/qede_if.h                |    2 +-
 drivers/net/qede/qede_main.c              |    3 +-
 11 files changed, 296 insertions(+), 45 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 0199608..3bc1b20 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -507,6 +507,45 @@ struct ecore_fw_data {
 	u32 init_ops_size;
 };
 
+enum ecore_mf_mode_bit {
+	/* Supports PF-classification based on tag */
+	ECORE_MF_OVLAN_CLSS,
+
+	/* Supports PF-classification based on MAC */
+	ECORE_MF_LLH_MAC_CLSS,
+
+	/* Supports PF-classification based on protocol type */
+	ECORE_MF_LLH_PROTO_CLSS,
+
+	/* Requires a default PF to be set */
+	ECORE_MF_NEED_DEF_PF,
+
+	/* Allow LL2 to multicast/broadcast */
+	ECORE_MF_LL2_NON_UNICAST,
+
+	/* Allow Cross-PF [& child VFs] Tx-switching */
+	ECORE_MF_INTER_PF_SWITCH,
+
+	/* TODO - if we ever re-utilize any of this logic, we can rename */
+	ECORE_MF_UFP_SPECIFIC,
+};
+
+enum ecore_ufp_mode {
+	ECORE_UFP_MODE_ETS,
+	ECORE_UFP_MODE_VNIC_BW,
+};
+
+enum ecore_ufp_pri_type {
+	ECORE_UFP_PRI_OS,
+	ECORE_UFP_PRI_VNIC
+};
+
+struct ecore_ufp_info {
+	enum ecore_ufp_pri_type pri_type;
+	enum ecore_ufp_mode mode;
+	u8 tc;
+};
+
 struct ecore_hwfn {
 	struct ecore_dev		*p_dev;
 	u8				my_id;		/* ID inside the PF */
@@ -588,6 +627,7 @@ struct ecore_hwfn {
 	struct ecore_pf_iov		*pf_iov_info;
 	struct ecore_mcp_info		*mcp_info;
 	struct ecore_dcbx_info		*p_dcbx_info;
+	struct ecore_ufp_info		ufp_info;
 
 	struct ecore_dmae_info		dmae_info;
 
@@ -625,13 +665,12 @@ struct ecore_hwfn {
 	struct ecore_ptt		*p_arfs_ptt;
 };
 
-#ifndef __EXTRACT__LINUX__
 enum ecore_mf_mode {
 	ECORE_MF_DEFAULT,
 	ECORE_MF_OVLAN,
 	ECORE_MF_NPAR,
+	ECORE_MF_UFP,
 };
-#endif
 
 /* @DPDK */
 struct ecore_dbg_feature {
@@ -727,6 +766,8 @@ struct ecore_dev {
 	u8				num_funcs_in_port;
 
 	u8				path_id;
+
+	unsigned long			mf_bits;
 	enum ecore_mf_mode		mf_mode;
 #define IS_MF_DEFAULT(_p_hwfn)	\
 	(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 66f21fb..ba3560a 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -234,8 +234,8 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 		       int count, u8 dcbx_version)
 {
 	enum dcbx_protocol_type type;
+	bool enable, ieee, eth_tlv;
 	u8 tc, priority_map;
-	bool enable, ieee;
 	u16 protocol_id;
 	u8 priority;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -246,6 +246,7 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 		   count, pri_tc_tbl, dcbx_version);
 
 	ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+	eth_tlv = false;
 	/* Parse APP TLV */
 	for (i = 0; i < count; i++) {
 		protocol_id = GET_MFW_FIELD(p_tbl[i].entry,
@@ -269,12 +270,23 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 			 * indication, but we only got here if there was an
 			 * app tlv for the protocol, so dcbx must be enabled.
 			 */
-			enable = !(type == DCBX_PROTOCOL_ETH);
+			if (type == DCBX_PROTOCOL_ETH) {
+				enable = false;
+				eth_tlv = true;
+			} else {
+				enable = true;
+			}
 
 			ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
 						   priority, tc, type);
 		}
 	}
+
+	/* If Eth TLV is not detected, use UFP TC as default TC */
+	if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC,
+			  &p_hwfn->p_dev->mf_bits) && !eth_tlv)
+		p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
+
 	/* Update ramrod protocol data and hw_info fields
 	 * with default info when corresponding APP TLV's are not detected.
 	 * The enabled field has a different logic for ethernet as only for
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index edf2896..283c65b 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1455,19 +1455,11 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
 		return ECORE_INVAL;
 	}
 
-	switch (p_hwfn->p_dev->mf_mode) {
-	case ECORE_MF_DEFAULT:
-	case ECORE_MF_NPAR:
-		hw_mode |= 1 << MODE_MF_SI;
-		break;
-	case ECORE_MF_OVLAN:
+	if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
+			  &p_hwfn->p_dev->mf_bits))
 		hw_mode |= 1 << MODE_MF_SD;
-		break;
-	default:
-		DP_NOTICE(p_hwfn, true,
-			  "Unsupported MF mode, init as DEFAULT\n");
+	else
 		hw_mode |= 1 << MODE_MF_SI;
-	}
 
 #ifndef ASIC_ONLY
 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
@@ -2154,6 +2146,11 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
 			     p_hwfn->hw_info.ovlan);
+
+		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+			   "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+			     1);
 	}
 
 	/* Enable classification by MAC if needed */
@@ -2214,7 +2211,6 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
 
 		/* send function start command */
 		rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
-				       p_hwfn->p_dev->mf_mode,
 				       allow_npar_tx_switch);
 		if (rc) {
 			DP_NOTICE(p_hwfn, true,
@@ -3504,6 +3500,37 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 
 	switch (mf_mode) {
 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
+		break;
+	case NVM_CFG1_GLOB_MF_MODE_UFP:
+		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+					 1 << ECORE_MF_UFP_SPECIFIC;
+		break;
+
+	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+					 1 << ECORE_MF_LLH_PROTO_CLSS |
+					 1 << ECORE_MF_LL2_NON_UNICAST |
+					 1 << ECORE_MF_INTER_PF_SWITCH;
+		break;
+	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+					 1 << ECORE_MF_LLH_PROTO_CLSS |
+					 1 << ECORE_MF_LL2_NON_UNICAST;
+		if (ECORE_IS_BB(p_hwfn->p_dev))
+			p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+		break;
+	}
+	DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+		p_hwfn->p_dev->mf_bits);
+
+	/* It's funny since we have another switch, but it's easier
+	 * to throw this away in linux this way. Long term, it might be
+	 * better to have have getters for needed ECORE_MF_* fields,
+	 * convert client code and eliminate this.
+	 */
+	switch (mf_mode) {
+	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
 		p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
 		break;
 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
@@ -3512,9 +3539,10 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
 		p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
 		break;
+	case NVM_CFG1_GLOB_MF_MODE_UFP:
+		p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
+		break;
 	}
-	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
-		p_hwfn->p_dev->mf_mode);
 
 	/* Read Multi-function information from shmem */
 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -3813,6 +3841,8 @@ static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
 		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
 
 		ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
+
+		ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
 	}
 
 	if (personality != ECORE_PCI_DEFAULT) {
@@ -4609,7 +4639,8 @@ enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
 	u32 high, low, entry_num;
 	enum _ecore_status_t rc;
 
-	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+			   &p_hwfn->p_dev->mf_bits))
 		return ECORE_SUCCESS;
 
 	high = p_filter[1] | (p_filter[0] << 8);
@@ -4676,7 +4707,8 @@ void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
 	u32 high, low, entry_num;
 	enum _ecore_status_t rc;
 
-	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+			   &p_hwfn->p_dev->mf_bits))
 		return;
 
 	high = p_filter[1] | (p_filter[0] << 8);
@@ -4750,7 +4782,8 @@ enum _ecore_status_t
 	u32 high, low, entry_num;
 	enum _ecore_status_t rc;
 
-	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+			   &p_hwfn->p_dev->mf_bits))
 		return ECORE_SUCCESS;
 
 	high = 0;
@@ -4893,7 +4926,8 @@ enum _ecore_status_t
 	u32 high, low, entry_num;
 	enum _ecore_status_t rc;
 
-	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+			   &p_hwfn->p_dev->mf_bits))
 		return;
 
 	high = 0;
@@ -4961,7 +4995,10 @@ static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
 			     struct ecore_ptt *p_ptt)
 {
-	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+			   &p_hwfn->p_dev->mf_bits) &&
+	    !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+			   &p_hwfn->p_dev->mf_bits))
 		return;
 
 	if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
@@ -4972,7 +5009,7 @@ enum _ecore_status_t
 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
 				  struct ecore_ptt *p_ptt)
 {
-	if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) {
+	if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
 		ecore_wr(p_hwfn, p_ptt,
 			 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
 			 1 << p_hwfn->abs_pf_id / 2);
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index fd453f5..98bcabe 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -194,6 +194,12 @@ enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
 					   void OSAL_IOMEM *db_addr,
 					   void *db_data);
+
+static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
+{
+	return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
+}
+
 #endif
 
 /**
@@ -295,7 +301,6 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
 		       struct ecore_ptt *p_ptt);
 
-#ifndef __EXTRACT__LINUX__
 struct ecore_eth_stats_common {
 	u64 no_buff_discards;
 	u64 packet_too_big_discard;
@@ -386,7 +391,6 @@ struct ecore_eth_stats {
 		struct ecore_eth_stats_ah ah;
 	};
 };
-#endif
 
 enum ecore_dmae_address_type_t {
 	ECORE_DMAE_ADDRESS_HOST_VIRT,
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index e6980e6..6b5d755 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -21,6 +21,7 @@
 #include "ecore_iro.h"
 #include "ecore_dcbx.h"
 #include "ecore_sp_commands.h"
+#include "ecore_cxt.h"
 
 #define CHIP_MCP_RESP_ITER_US 10
 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
@@ -1860,6 +1861,74 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
 }
 
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+	struct public_func shmem_info;
+	u32 port_cfg, val;
+
+	if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+		return;
+
+	OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+	port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+			    OFFSETOF(struct public_port, oem_cfg_port));
+	val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
+	if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+		DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type  %d\n",
+			  val);
+
+	val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
+	if (val == OEM_CFG_SCHED_TYPE_ETS)
+		p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
+	else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
+		p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
+	else
+		DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
+			  val);
+
+	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+				 MCP_PF_ID(p_hwfn));
+	val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
+	p_hwfn->ufp_info.tc = (u8)val;
+	val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
+			    OEM_CFG_FUNC_HOST_PRI_CTRL);
+	if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
+		p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
+	else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
+		p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
+	else
+		DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
+			  val);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+		   "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
+		   p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+		   p_hwfn->ufp_info.pri_type);
+}
+
+static enum _ecore_status_t
+ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+	ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+	if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
+		p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+		p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+
+		ecore_qm_reconf(p_hwfn, p_ptt);
+	} else {
+		/* Merge UFP TC with the dcbx TC data */
+		ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+					    ECORE_DCBX_OPERATIONAL_MIB);
+	}
+
+	/* update storm FW with negotiation results */
+	ecore_sp_pf_update_ufp(p_hwfn);
+
+	return ECORE_SUCCESS;
+}
+
 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
 					     struct ecore_ptt *p_ptt)
 {
@@ -1903,6 +1972,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
 						    ECORE_DCBX_OPERATIONAL_MIB);
 			break;
+		case MFW_DRV_MSG_OEM_CFG_UPDATE:
+			ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
+			break;
 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
 			ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
 			break;
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 569c064..7f12a0a 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -521,4 +521,13 @@ enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
 						struct ecore_ptt *p_ptt);
 
+/**
+ * @brief Read ufp config from the shared memory.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
 #endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index dfa2ab0..7598e7a 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -294,10 +294,11 @@ static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
 					&p_tun->ip_gre);
 }
 
+#define ETH_P_8021Q 0x8100
+
 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 				       struct ecore_ptt *p_ptt,
 				       struct ecore_tunnel_info *p_tunn,
-				       enum ecore_mf_mode mode,
 				       bool allow_npar_tx_switch)
 {
 	struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
@@ -307,6 +308,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 	struct ecore_sp_init_data init_data;
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	u8 page_cnt;
+	int i;
 
 	/* update initial eq producer */
 	ecore_eq_prod_update(p_hwfn,
@@ -334,20 +336,26 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 	p_ramrod->dont_log_ramrods = 0;
 	p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
 
-	switch (mode) {
-	case ECORE_MF_DEFAULT:
-	case ECORE_MF_NPAR:
-		p_ramrod->mf_mode = MF_NPAR;
-		break;
-	case ECORE_MF_OVLAN:
+	if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
 		p_ramrod->mf_mode = MF_OVLAN;
-		break;
-	default:
-		DP_NOTICE(p_hwfn, true,
-			  "Unsupported MF mode, init as DEFAULT\n");
+	else
 		p_ramrod->mf_mode = MF_NPAR;
+
+	p_ramrod->outer_tag_config.outer_tag.tci =
+		OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+
+	if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
+		p_ramrod->outer_tag_config.outer_tag.tpid =
+			OSAL_CPU_TO_LE16(ETH_P_8021Q);
+		if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+			p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+		else
+			p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+		p_ramrod->outer_tag_config.pri_map_valid = 1;
+		for (i = 0; i < 8; i++)
+			p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] =
+									  (u8)i;
 	}
-	p_ramrod->outer_tag_config.outer_tag.tpid = p_hwfn->hw_info.ovlan;
 
 	/* Place EQ address in RAMROD */
 	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -360,7 +368,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 	ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
 				       &p_ramrod->tunnel_config);
 
-	if (IS_MF_SI(p_hwfn))
+	if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
+			  &p_hwfn->p_dev->mf_bits))
 		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
 
 	switch (p_hwfn->hw_info.personality) {
@@ -386,8 +395,9 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
-		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-		   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid);
+		   "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
+		   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
+		   p_ramrod->outer_tag_config.outer_tag.tci);
 
 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 
@@ -422,6 +432,34 @@ enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_spq_entry *p_ent = OSAL_NULL;
+	struct ecore_sp_init_data init_data;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = ecore_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+				   &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+	if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+		p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+	else
+		p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+
 /* QM rate limiter resolution is 1.6Mbps */
 #define QM_RL_RESOLUTION(mb_val)	((mb_val) * 10 / 16)
 
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 74f6a34..98009c6 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -61,7 +61,6 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
  * @param p_hwfn
  * @param p_ptt
  * @param p_tunn - pf start tunneling configuration
- * @param mode
  * @param allow_npar_tx_switch - npar tx switching to be used
  *	  for vports configured for tx-switching.
  *
@@ -71,7 +70,6 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 				       struct ecore_ptt *p_ptt,
 				       struct ecore_tunnel_info *p_tunn,
-				       enum ecore_mf_mode mode,
 				       bool allow_npar_tx_switch);
 
 /**
@@ -155,4 +153,13 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
 
 enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn);
 
+/**
+ * @brief ecore_sp_pf_update_ufp - PF ufp update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn);
+
 #endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 7ac2820..5153f25 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -814,6 +814,17 @@ struct public_port {
 #define ETH_TRANSCEIVER_HAS_DIAGNOSTIC			(1 << 6)
 #define ETH_TRANSCEIVER_IDENT_MASK			0x0000ff00
 #define ETH_TRANSCEIVER_IDENT_OFFSET			8
+
+	u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK			0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET			0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION		0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED			0x2
+
+#define OEM_CFG_SCHED_TYPE_MASK				0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET			2
+#define OEM_CFG_SCHED_TYPE_ETS				0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW			0x2
 };
 
 /**************************************/
@@ -930,6 +941,23 @@ struct public_func {
 #define DRV_ID_DRV_INIT_HW_MASK		0x80000000
 #define DRV_ID_DRV_INIT_HW_OFFSET	31
 #define DRV_ID_DRV_INIT_HW_FLAG		(1 << DRV_ID_DRV_INIT_HW_OFFSET)
+
+	u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK			0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET			0
+#define OEM_CFG_FUNC_TC_0			0x0
+#define OEM_CFG_FUNC_TC_1			0x1
+#define OEM_CFG_FUNC_TC_2			0x2
+#define OEM_CFG_FUNC_TC_3			0x3
+#define OEM_CFG_FUNC_TC_4			0x4
+#define OEM_CFG_FUNC_TC_5			0x5
+#define OEM_CFG_FUNC_TC_6			0x6
+#define OEM_CFG_FUNC_TC_7			0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK		0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET	4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC		0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS		0x2
 };
 
 /**************************************/
@@ -1735,6 +1763,8 @@ enum MFW_DRV_MSG_TYPE {
 	MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
 	MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
 	MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+	MFW_DRV_MSG_GET_TLV_REQ,
+	MFW_DRV_MSG_OEM_CFG_UPDATE,
 	MFW_DRV_MSG_MAX
 };
 
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 02af2ee..1f97b59 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -40,7 +40,7 @@ struct qed_dev_info {
 #define QED_MFW_VERSION_3_OFFSET	24
 
 	uint32_t flash_size;
-	uint8_t mf_mode;
+	bool b_inter_pf_switch;
 	bool tx_switching;
 	u16 mtu;
 
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 48dd3b1..2f6a4dc 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -376,7 +376,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 	dev_info->fw_eng = FW_ENGINEERING_VERSION;
 
 	if (IS_PF(edev)) {
-		dev_info->mf_mode = edev->mf_mode;
+		dev_info->b_inter_pf_switch =
+			OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, &edev->mf_bits);
 		dev_info->tx_switching = false;
 
 		dev_info->smart_an = ecore_mcp_is_smart_an_supported(p_hwfn);
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 43/53] net/qede/base: add support for mapped doorbell Bars for VFs
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (11 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 42/53] net/qede/base: add UFP support Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 44/53] net/qede/base: add support for driver attribute repository Rasesh Mody
                   ` (9 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Determines whether VF utilizes doorbells via limited register bar or via
the doorbell bar and return the size of the HW doorbell bar via acquire
response. By doing that limit the VF CIDs to an amount that would make sure
doorbells for all CIDs fall within the bar.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h         |    5 +
 drivers/net/qede/base/ecore_dev.c     |   14 +--
 drivers/net/qede/base/ecore_sriov.c   |   65 ++++++++++-
 drivers/net/qede/base/ecore_vf.c      |  198 +++++++++++++++++++++++----------
 drivers/net/qede/base/ecore_vf.h      |    8 ++
 drivers/net/qede/base/ecore_vfpf_if.h |    9 +-
 6 files changed, 224 insertions(+), 75 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 3bc1b20..3b51fc2 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -546,6 +546,11 @@ struct ecore_ufp_info {
 	u8 tc;
 };
 
+enum BAR_ID {
+	BAR_ID_0,	/* used for GRC */
+	BAR_ID_1	/* Used for doorbells */
+};
+
 struct ecore_hwfn {
 	struct ecore_dev		*p_dev;
 	u8				my_id;		/* ID inside the PF */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 283c65b..0568470 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -363,11 +363,6 @@ void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
 /* Derived */
 #define ECORE_MIN_PWM_REGION	(ECORE_WID_SIZE * ECORE_MIN_DPIS)
 
-enum BAR_ID {
-	BAR_ID_0,		/* used for GRC */
-	BAR_ID_1		/* Used for doorbells */
-};
-
 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
 			     struct ecore_ptt *p_ptt,
 			     enum BAR_ID bar_id)
@@ -376,13 +371,8 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
 	u32 val;
 
-	if (IS_VF(p_hwfn->p_dev)) {
-		/* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
-		 * read from actual register, but we're currently not using
-		 * it for actual doorbelling.
-		 */
-		return 1 << 17;
-	}
+	if (IS_VF(p_hwfn->p_dev))
+		return ecore_vf_hw_bar_size(p_hwfn, bar_id);
 
 	val = ecore_rd(p_hwfn, p_ptt, bar_reg);
 	if (val)
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index ed9ace2..a1d4982 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1538,7 +1538,62 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
 	OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
 }
 
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+				    struct ecore_ptt *p_ptt)
+{
+	u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+	if (val)
+		return val + 11;
+	return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+				   struct ecore_ptt *p_ptt,
+				   struct ecore_vf_info *p_vf,
+				   struct vf_pf_resc_request *p_req,
+				   struct pf_vf_resc *p_resp)
+{
+	u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+	u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+		     DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+	u32 bar_size;
+
+	p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+	/* If VF didn't bother asking for QIDs than don't bother limiting
+	 * number of CIDs. The VF doesn't care about the number, and this
+	 * has the likely result of causing an additional acquisition.
+	 */
+	if (!(p_vf->acquire.vfdev_info.capabilities &
+	      VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+		return;
+
+	/* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+	 * that would make sure doorbells for all CIDs fall within the bar.
+	 * If it doesn't, make sure regview window is sufficient.
+	 */
+	if (p_vf->acquire.vfdev_info.capabilities &
+	    VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+		bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+		if (bar_size)
+			bar_size = 1 << bar_size;
+
+		if (ECORE_IS_CMT(p_hwfn->p_dev))
+			bar_size /= 2;
+	} else {
+		bar_size = PXP_VF_BAR0_DQ_LENGTH;
+	}
+
+	if (bar_size / db_size < 256)
+		p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+					      (u8)(bar_size / db_size));
+}
+
 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
+					struct ecore_ptt *p_ptt,
 					struct ecore_vf_info *p_vf,
 					struct vf_pf_resc_request *p_req,
 					struct pf_vf_resc *p_resp)
@@ -1573,9 +1628,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
 	p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
 					      p_req->num_vlan_filters);
 
-	p_resp->num_cids =
-		OSAL_MIN_T(u8, p_req->num_cids,
-			   p_hwfn->pf_params.eth_pf_params.num_vf_cons);
+	ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
 
 	/* This isn't really needed/enforced, but some legacy VFs might depend
 	 * on the correct filling of this field.
@@ -1739,6 +1792,10 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 	if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
 
+	/* Share the sizes of the bars with VF */
+	resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+							     p_ptt);
+
 	ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
 
 	OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
@@ -1764,7 +1821,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 	/* Fill resources available to VF; Make sure there are enough to
 	 * satisfy the VF's request.
 	 */
-	vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, vf,
+	vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
 						    &req->resc_request, resc);
 	if (vfpf_status != PFVF_STATUS_SUCCESS)
 		goto out;
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index e84f97a..b78d735 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -151,6 +151,69 @@ static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
 	p_qid_tlv->qid = p_cid->qid_usage_idx;
 }
 
+enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn,
+					  bool b_final)
+{
+	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_def_resp_tlv *resp;
+	struct vfpf_first_tlv *req;
+	u32 size;
+	enum _ecore_status_t rc;
+
+	/* clear mailbox and prep first tlv */
+	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+	/* add list termination tlv */
+	ecore_add_tlv(&p_iov->offset,
+		      CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+	if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+		rc = ECORE_AGAIN;
+
+	ecore_vf_pf_req_end(p_hwfn, rc);
+	if (!b_final)
+		return rc;
+
+	p_hwfn->b_int_enabled = 0;
+
+	if (p_iov->vf2pf_request)
+		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+				       p_iov->vf2pf_request,
+				       p_iov->vf2pf_request_phys,
+				       sizeof(union vfpf_tlvs));
+	if (p_iov->pf2vf_reply)
+		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+				       p_iov->pf2vf_reply,
+				       p_iov->pf2vf_reply_phys,
+				       sizeof(union pfvf_tlvs));
+
+	if (p_iov->bulletin.p_virt) {
+		size = sizeof(struct ecore_bulletin_content);
+		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+				       p_iov->bulletin.p_virt,
+				       p_iov->bulletin.phys,
+				       size);
+	}
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+	OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+#endif
+
+	OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+	p_hwfn->vf_iov_info = OSAL_NULL;
+
+	return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+	return _ecore_vf_pf_release(p_hwfn, true);
+}
+
 #define VF_ACQUIRE_THRESH 3
 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
 					    struct vf_pf_resc_request *p_req,
@@ -217,6 +280,11 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
 	/* Fill capability field with any non-deprecated config we support */
 	req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
 
+	/* If we've mapped the doorbell bar, try using queue qids */
+	if (p_iov->b_doorbell_bar)
+		req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+						VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+
 	/* pf 2 vf bulletin board address */
 	req->bulletin_addr = p_iov->bulletin.phys;
 	req->bulletin_size = p_iov->bulletin.size;
@@ -380,10 +448,28 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
 	return rc;
 }
 
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+			 enum BAR_ID bar_id)
+{
+	u32 bar_size;
+
+	/* Regview size is fixed */
+	if (bar_id == BAR_ID_0)
+		return 1 << 17;
+
+	/* Doorbell is received from PF */
+	bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+	if (bar_size)
+		return 1 << bar_size;
+	return 0;
+}
+
 enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
 {
+	struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
 	struct ecore_vf_iov *p_iov;
 	u32 reg;
+	enum _ecore_status_t rc;
 
 	/* Set number of hwfns - might be overridden once leading hwfn learns
 	 * actual configuration from PF.
@@ -391,10 +477,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
 	if (IS_LEAD_HWFN(p_hwfn))
 		p_hwfn->p_dev->num_hwfns = 1;
 
-	/* Set the doorbell bar. Assumption: regview is set */
-	p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
-	    PXP_VF_BAR0_START_DQ;
-
 	reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
 
@@ -409,6 +491,31 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
 		return ECORE_NOMEM;
 	}
 
+	/* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+	 * value, but there are several incompatibily scenarios where that
+	 * would be incorrect and we'd need to override it.
+	 */
+	if (p_hwfn->doorbells == OSAL_NULL) {
+		p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+						     PXP_VF_BAR0_START_DQ;
+	} else if (p_hwfn == p_lead) {
+		/* For leading hw-function, value is always correct, but need
+		 * to handle scenario where legacy PF would not support 100g
+		 * mapped bars later.
+		 */
+		p_iov->b_doorbell_bar = true;
+	} else {
+		/* here, value would be correct ONLY if the leading hwfn
+		 * received indication that mapped-bars are supported.
+		 */
+		if (p_lead->vf_iov_info->b_doorbell_bar)
+			p_iov->b_doorbell_bar = true;
+		else
+			p_hwfn->doorbells = (u8 OSAL_IOMEM *)
+					    p_hwfn->regview +
+					    PXP_VF_BAR0_START_DQ;
+	}
+
 	/* Allocate vf2pf msg */
 	p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
 							 &p_iov->
@@ -460,7 +567,35 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
 
 	p_hwfn->hw_info.personality = ECORE_PCI_ETH;
 
-	return ecore_vf_pf_acquire(p_hwfn);
+	rc = ecore_vf_pf_acquire(p_hwfn);
+
+	/* If VF is 100g using a mapped bar and PF is too old to support that,
+	 * acquisition would succeed - but the VF would have no way knowing
+	 * the size of the doorbell bar configured in HW and thus will not
+	 * know how to split it for 2nd hw-function.
+	 * In this case we re-try without the indication of the mapped
+	 * doorbell.
+	 */
+	if (rc == ECORE_SUCCESS &&
+	    p_iov->b_doorbell_bar &&
+	    !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+	    ECORE_IS_CMT(p_hwfn->p_dev)) {
+		rc = _ecore_vf_pf_release(p_hwfn, false);
+		if (rc != ECORE_SUCCESS)
+			return rc;
+
+		p_iov->b_doorbell_bar = false;
+		p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+						     PXP_VF_BAR0_START_DQ;
+		rc = ecore_vf_pf_acquire(p_hwfn);
+	}
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+		   "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+		   p_hwfn->regview, p_hwfn->doorbells,
+		   p_hwfn->p_dev->doorbells);
+
+	return rc;
 
 free_vf2pf_request:
 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
@@ -1304,59 +1439,6 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
 	return rc;
 }
 
-enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
-{
-	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
-	struct pfvf_def_resp_tlv *resp;
-	struct vfpf_first_tlv *req;
-	u32 size;
-	enum _ecore_status_t rc;
-
-	/* clear mailbox and prep first tlv */
-	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
-
-	/* add list termination tlv */
-	ecore_add_tlv(&p_iov->offset,
-		      CHANNEL_TLV_LIST_END,
-		      sizeof(struct channel_list_end_tlv));
-
-	resp = &p_iov->pf2vf_reply->default_resp;
-	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
-
-	if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
-		rc = ECORE_AGAIN;
-
-	ecore_vf_pf_req_end(p_hwfn, rc);
-
-	p_hwfn->b_int_enabled = 0;
-
-	if (p_iov->vf2pf_request)
-		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-				       p_iov->vf2pf_request,
-				       p_iov->vf2pf_request_phys,
-				       sizeof(union vfpf_tlvs));
-	if (p_iov->pf2vf_reply)
-		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-				       p_iov->pf2vf_reply,
-				       p_iov->pf2vf_reply_phys,
-				       sizeof(union pfvf_tlvs));
-
-	if (p_iov->bulletin.p_virt) {
-		size = sizeof(struct ecore_bulletin_content);
-		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-				       p_iov->bulletin.p_virt,
-				       p_iov->bulletin.phys, size);
-	}
-
-#ifdef CONFIG_ECORE_LOCK_ALLOC
-	OSAL_MUTEX_DEALLOC(&p_iov->mutex);
-#endif
-
-	OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
-
-	return rc;
-}
-
 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
 			      struct ecore_filter_mcast *p_filter_cmd)
 {
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 8c44d37..fdb0fe0 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -49,6 +49,11 @@ struct ecore_vf_iov {
 	 * compatibility [with older PFs] we'd still need to store these.
 	 */
 	struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+	/* Determines whether VF utilizes doorbells via limited register
+	 * bar or via the doorbell bar.
+	 */
+	bool b_doorbell_bar;
 };
 
 /**
@@ -304,5 +309,8 @@ enum _ecore_status_t
 				struct ecore_tunnel_info *p_tunn);
 
 void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
+
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+		     enum BAR_ID bar_id);
 #endif
 #endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index d632423..3ccc766 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -101,6 +101,12 @@ struct vfpf_acquire_tlv {
 	 * this, and use the legacy CID scheme.
 	 */
 #define VFPF_ACQUIRE_CAP_QUEUE_QIDS	(1 << 2)
+
+	/* The VF is using the physical bar. While this is mostly internal
+	 * to the VF, might affect the number of CIDs supported assuming
+	 * QUEUE_QIDS is set.
+	 */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR	(1 << 3)
 		u64 capabilities;
 		u8 fw_major;
 		u8 fw_minor;
@@ -190,7 +196,8 @@ struct pfvf_acquire_resp_tlv {
 		u16 chip_rev;
 		u8 dev_type;
 
-		u8 padding;
+		/* Doorbell bar size configured in HW: log(size) or 0 */
+		u8 bar_size;
 
 		struct pfvf_stats_info stats_info;
 
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 44/53] net/qede/base: add support for driver attribute repository
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (12 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 43/53] net/qede/base: add support for mapped doorbell Bars for VFs Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 45/53] net/qede/base: move define to header file Rasesh Mody
                   ` (8 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Add support for driver attributes repository in MFW and base driver.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_mcp.c  |   73 ++++++++++++++++++++++++++++++++++++
 drivers/net/qede/base/ecore_mcp.h  |   35 +++++++++++++++++
 drivers/net/qede/base/mcp_public.h |   28 ++++++++++++++
 3 files changed, 136 insertions(+)

diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 6b5d755..89c9864 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -3674,3 +3674,76 @@ enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
 			     features, &mcp_resp, &mcp_param);
 }
+
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			struct ecore_mcp_drv_attr *p_drv_attr)
+{
+	struct attribute_cmd_write_stc attr_cmd_write;
+	enum _attribute_commands_e mfw_attr_cmd;
+	struct ecore_mcp_mb_params mb_params;
+	enum _ecore_status_t rc;
+
+	switch (p_drv_attr->attr_cmd) {
+	case ECORE_MCP_DRV_ATTR_CMD_READ:
+		mfw_attr_cmd = ATTRIBUTE_CMD_READ;
+		break;
+	case ECORE_MCP_DRV_ATTR_CMD_WRITE:
+		mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
+		break;
+	case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
+		mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
+		break;
+	case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
+		mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
+		break;
+	default:
+		DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
+			  p_drv_attr->attr_cmd);
+		return ECORE_INVAL;
+	}
+
+	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+	mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
+	SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
+		      p_drv_attr->attr_num);
+	SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
+		      mfw_attr_cmd);
+	if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
+		OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
+		attr_cmd_write.val = p_drv_attr->val;
+		attr_cmd_write.mask = p_drv_attr->mask;
+		attr_cmd_write.offset = p_drv_attr->offset;
+
+		mb_params.p_data_src = &attr_cmd_write;
+		mb_params.data_src_size = sizeof(attr_cmd_write);
+	}
+
+	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+		DP_INFO(p_hwfn,
+			"The attribute command is not supported by the MFW\n");
+		return ECORE_NOTIMPL;
+	} else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
+		DP_INFO(p_hwfn,
+			"Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
+			mb_params.mcp_resp, p_drv_attr->attr_cmd,
+			p_drv_attr->attr_num);
+		return ECORE_INVAL;
+	}
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
+		   p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
+		   p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
+		   mb_params.mcp_param);
+
+	if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
+	    p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
+		p_drv_attr->val = mb_params.mcp_param;
+
+	return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 7f12a0a..875b205 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -521,6 +521,41 @@ enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
 						struct ecore_ptt *p_ptt);
 
+enum ecore_mcp_drv_attr_cmd {
+	ECORE_MCP_DRV_ATTR_CMD_READ,
+	ECORE_MCP_DRV_ATTR_CMD_WRITE,
+	ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR,
+	ECORE_MCP_DRV_ATTR_CMD_CLEAR,
+};
+
+struct ecore_mcp_drv_attr {
+	enum ecore_mcp_drv_attr_cmd attr_cmd;
+	u32 attr_num;
+
+	/* R/RC - will be set with the read value
+	 * W - should hold the required value to be written
+	 * C - DC
+	 */
+	u32 val;
+
+	/* W - mask/offset to be applied on the given value
+	 * R/RC/C - DC
+	 */
+	u32 mask;
+	u32 offset;
+};
+
+/**
+ * @brief Handle the drivers' attributes that are kept by the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_drv_attr
+ */
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+			struct ecore_mcp_drv_attr *p_drv_attr);
+
 /**
  * @brief Read ufp config from the shared memory.
  *
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 5153f25..e2145ab 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -475,6 +475,18 @@ struct dcb_dscp_map {
 	u32 dscp_pri_map[8];
 };
 
+/**************************************
+ *     Attributes commands
+ **************************************/
+
+enum _attribute_commands_e {
+	ATTRIBUTE_CMD_READ = 0,
+	ATTRIBUTE_CMD_WRITE,
+	ATTRIBUTE_CMD_READ_CLEAR,
+	ATTRIBUTE_CMD_CLEAR,
+	ATTRIBUTE_NUM_OF_COMMANDS
+};
+
 /**************************************/
 /*                                    */
 /*     P U B L I C      G L O B A L   */
@@ -1149,6 +1161,12 @@ struct mdump_retain_data_stc {
 	u32 status;
 };
 
+struct attribute_cmd_write_stc {
+	u32 val;
+	u32 mask;
+	u32 offset;
+};
+
 union drv_union_data {
 	struct mcp_mac wol_mac; /* UNLOAD_DONE */
 
@@ -1180,6 +1198,7 @@ struct mdump_retain_data_stc {
 	struct load_req_stc load_req;
 	struct load_rsp_stc load_rsp;
 	struct mdump_retain_data_stc mdump_retain;
+	struct attribute_cmd_write_stc attribute_cmd_write;
 	/* ... */
 };
 
@@ -1414,6 +1433,8 @@ struct public_drv_mb {
 #define DRV_MSG_CODE_FEATURE_SUPPORT            0x00300000
 /* return FW_MB_PARAM_FEATURE_SUPPORT_*  */
 #define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT	0x00310000
+/* Param: [0:23] Attribute key, [24:31] Attribute sub command */
+#define DRV_MSG_CODE_ATTRIBUTE			0x00350000
 
 #define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
 
@@ -1573,6 +1594,11 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE       0x00000002
 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK      0xFFFF0000
 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET     16
+	/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET		 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK		0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET		24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK		0xFF000000
 
 	u32 fw_mb_header;
 #define FW_MSG_CODE_MASK                        0xffff0000
@@ -1686,6 +1712,8 @@ struct public_drv_mb {
 
 #define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
 
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_KEY	0x00020000
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_CMD	0x00030000
 
 	u32 fw_mb_param;
 /* Resource Allocation params - MFW  version support */
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 45/53] net/qede/base: move define to header file
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (13 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 44/53] net/qede/base: add support for driver attribute repository Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 46/53] net/qede/base: dcbx dscp related extensions Rasesh Mody
                   ` (7 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Move FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR to header file

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_mcp.c  |    2 --
 drivers/net/qede/base/mcp_public.h |    2 ++
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 89c9864..019e092 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1068,8 +1068,6 @@ enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
 		return rc;
 	}
 
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR     (1 << 0)
-
 	/* Check if there is a DID mismatch between nvm-cfg/efuse */
 	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
 		DP_NOTICE(p_hwfn, false,
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index e2145ab..8b0c220 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1728,6 +1728,8 @@ struct public_drv_mb {
 /* MFW supports EEE */
 #define FW_MB_PARAM_FEATURE_SUPPORT_EEE         0x00000002
 
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR	(1 << 0)
+
 	u32 drv_pulse_mb;
 #define DRV_PULSE_SEQ_MASK                      0x00007fff
 #define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 46/53] net/qede/base: dcbx dscp related extensions
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (14 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 45/53] net/qede/base: move define to header file Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 47/53] net/qede/base: add feature support for per-PF virtual link Rasesh Mody
                   ` (6 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

- Add an internal API ecore_dcbx_get_dscp_value() for getting the
dscp value for a given priority.

- Initialize dscp parameters in the dcbx-config cache to be used by
the clients for configuring dcbx parameters.

- Reset NIG_REG_DSCP_TO_TC_MAP_ENABLE register when user disables the dscp.

- Fix to always send "dscp + dcbx" update to FW.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dcbx.c |  105 +++++++++++++++++++++---------------
 drivers/net/qede/base/ecore_dcbx.h |    5 ++
 2 files changed, 66 insertions(+), 44 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index ba3560a..54c61bf 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -114,6 +114,21 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	}
 }
 
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
+{
+	struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
+	u8 i;
+
+	if (!dscp->enabled)
+		return ECORE_DCBX_DSCP_DISABLED;
+
+	for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
+		if (pri == dscp->dscp_pri_map[i])
+			return i;
+
+	return ECORE_DCBX_DSCP_DISABLED;
+}
+
 static void
 ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
 		      struct ecore_hwfn *p_hwfn,
@@ -121,29 +136,18 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 		      enum dcbx_protocol_type type,
 		      enum ecore_pci_personality personality)
 {
-	struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
-
 	/* PF update ramrod data */
 	p_data->arr[type].enable = enable;
 	p_data->arr[type].priority = prio;
 	p_data->arr[type].tc = tc;
-	p_data->arr[type].dscp_enable = dscp->enabled;
-	if (p_data->arr[type].dscp_enable) {
-		u8 i;
-
-		for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
-			if (prio == dscp->dscp_pri_map[i]) {
-				p_data->arr[type].dscp_val = i;
-				break;
-			}
+	p_data->arr[type].dscp_val = ecore_dcbx_get_dscp_value(p_hwfn, prio);
+	if (p_data->arr[type].dscp_val == ECORE_DCBX_DSCP_DISABLED) {
+		p_data->arr[type].dscp_enable = false;
+		p_data->arr[type].dscp_val = 0;
+	} else {
+		p_data->arr[type].dscp_enable = true;
 	}
-
-	if (enable && p_data->arr[type].dscp_enable)
-		p_data->arr[type].update = UPDATE_DCB_DSCP;
-	else if (enable)
-		p_data->arr[type].update = UPDATE_DCB;
-	else
-		p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+	p_data->arr[type].update = UPDATE_DCB_DSCP;
 
 	/* QM reconf data */
 	if (p_hwfn->hw_info.personality == personality)
@@ -582,6 +586,31 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	params->remote.valid = true;
 }
 
+static void  ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+					struct ecore_dcbx_get *params)
+{
+	struct ecore_dcbx_dscp_params *p_dscp;
+	struct dcb_dscp_map *p_dscp_map;
+	int i, j, entry;
+	u32 pri_map;
+
+	p_dscp = &params->dscp;
+	p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+	p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
+
+	/* MFW encodes 64 dscp entries into 8 element array of u32 entries,
+	 * where each entry holds the 4bit priority map for 8 dscp entries.
+	 */
+	for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
+		pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
+			   entry, pri_map);
+		for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
+			p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
+							   (j * 4)) & 0xf;
+	}
+}
+
 static void
 ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
 				  struct ecore_dcbx_get *params)
@@ -640,31 +669,6 @@ static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	p_operational->valid = true;
 }
 
-static void  ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
-					struct ecore_dcbx_get *params)
-{
-	struct ecore_dcbx_dscp_params *p_dscp;
-	struct dcb_dscp_map *p_dscp_map;
-	int i, j, entry;
-	u32 pri_map;
-
-	p_dscp = &params->dscp;
-	p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
-	p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
-
-	/* MFW encodes 64 dscp entries into 8 element array of u32 entries,
-	 * where each entry holds the 4bit priority map for 8 dscp entries.
-	 */
-	for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
-		pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
-		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
-			   entry, pri_map);
-		for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
-			p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
-							   (j * 4)) & 0xf;
-	}
-}
-
 static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
 					     struct ecore_dcbx_get *params)
 {
@@ -894,7 +898,9 @@ enum _ecore_status_t
 	/* Update the DSCP to TC mapping bit if required */
 	if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
 	    p_hwfn->p_dcbx_info->dscp_nig_update) {
-		ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, 0x1);
+		u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
+
+		ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, val);
 		p_hwfn->p_dcbx_info->dscp_nig_update = false;
 	}
 
@@ -972,6 +978,8 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 	if (rc != ECORE_SUCCESS)
 		goto out;
 
+	ecore_dcbx_get_dscp_params(p_hwfn, p_get);
+
 	rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
 
 out:
@@ -1191,6 +1199,12 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 	p_hwfn->p_dcbx_info->dscp_nig_update = true;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+		   "pri_map[] = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+		   p_dscp_map->dscp_pri_map[0], p_dscp_map->dscp_pri_map[1],
+		   p_dscp_map->dscp_pri_map[2], p_dscp_map->dscp_pri_map[3],
+		   p_dscp_map->dscp_pri_map[4], p_dscp_map->dscp_pri_map[5],
+		   p_dscp_map->dscp_pri_map[6], p_dscp_map->dscp_pri_map[7]);
 
 	return ECORE_SUCCESS;
 }
@@ -1281,6 +1295,9 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
 		p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
 
 	p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+	OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.dscp,
+		    &p_hwfn->p_dcbx_info->get.dscp,
+		    sizeof(struct ecore_dcbx_dscp_params));
 	OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
 		    &dcbx_info->operational.params,
 		    sizeof(p_hwfn->p_dcbx_info->set.config.params));
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index a42ebb4..5986245 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -17,6 +17,8 @@
 #include "ecore_hsi_common.h"
 #include "ecore_dcbx_api.h"
 
+#define ECORE_DCBX_DSCP_DISABLED 0XFF
+
 struct ecore_dcbx_info {
 	struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
 	struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -52,4 +54,7 @@ enum _ecore_status_t
 void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
 				     struct pf_update_ramrod_data *p_dest);
 
+/* Returns TOS value for a given priority */
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri);
+
 #endif /* __ECORE_DCBX_H__ */
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 47/53] net/qede/base: add feature support for per-PF virtual link
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (15 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 46/53] net/qede/base: dcbx dscp related extensions Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 48/53] net/qede/base: catch an init command write failure Rasesh Mody
                   ` (5 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Add per-PF virtual link support.
This feature adds a logical layer over the physical link to reflect the
control of OEM management protocols either thru' sideband or a switch.
For example, a switch could send a link-down tlv for a PF and this will
put down logical link and virtual link in shared mem (SHMEM) for that PF
inspite of physical link being up for that port.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_mcp.c  |   70 ++++++++++++++++++++++--------------
 drivers/net/qede/base/mcp_public.h |    8 ++++-
 2 files changed, 50 insertions(+), 28 deletions(-)

diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 019e092..8a8670d 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1220,6 +1220,28 @@ static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
 		p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
 }
 
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+				    struct ecore_ptt *p_ptt,
+				    struct public_func *p_data,
+				    int pfid)
+{
+	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+					PUBLIC_FUNC);
+	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+	u32 i, size;
+
+	OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+	size = OSAL_MIN_T(u32, sizeof(*p_data),
+			  SECTION_SIZE(mfw_path_offsize));
+	for (i = 0; i < size / sizeof(u32); i++)
+		((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+					      func_addr + (i << 2));
+
+	return size;
+}
+
 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
 					 struct ecore_ptt *p_ptt,
 					 bool b_reset)
@@ -1249,10 +1271,24 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
 		goto out;
 	}
 
-	if (p_hwfn->b_drv_link_init)
-		p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
-	else
+	if (p_hwfn->b_drv_link_init) {
+		/* Link indication with modern MFW arrives as per-PF
+		 * indication.
+		 */
+		if (p_hwfn->mcp_info->capabilities &
+		    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+			struct public_func shmem_info;
+
+			ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+						 MCP_PF_ID(p_hwfn));
+			p_link->link_up = !!(shmem_info.status &
+					     FUNC_STATUS_VIRTUAL_LINK_UP);
+		} else {
+			p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+		}
+	} else {
 		p_link->link_up = false;
+	}
 
 	p_link->full_duplex = true;
 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1515,7 +1551,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
 		break;
 	default:
-		DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+			   "Invalid protocol type %d\n", type);
 		return;
 	}
 
@@ -1565,28 +1602,6 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
 	}
 }
 
-static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
-				    struct ecore_ptt *p_ptt,
-				    struct public_func *p_data,
-				    int pfid)
-{
-	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
-					PUBLIC_FUNC);
-	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
-	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
-	u32 i, size;
-
-	OSAL_MEM_ZERO(p_data, sizeof(*p_data));
-
-	size = OSAL_MIN_T(u32, sizeof(*p_data),
-			  SECTION_SIZE(mfw_path_offsize));
-	for (i = 0; i < size / sizeof(u32); i++)
-		((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
-					      func_addr + (i << 2));
-
-	return size;
-}
-
 static void
 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 {
@@ -3667,7 +3682,8 @@ enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
 	u32 mcp_resp, mcp_param, features;
 
 	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
-		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
+		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+		   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
 
 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
 			     features, &mcp_resp, &mcp_param);
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 8b0c220..d568179 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -896,7 +896,9 @@ struct public_func {
 #define FUNC_MF_CFG_MAX_BW_DEFAULT              0x00640000
 
 	u32 status;
-#define FUNC_STATUS_VLINK_DOWN			0x00000001
+#define FUNC_STATUS_VIRTUAL_LINK_UP		0x00000001
+#define FUNC_STATUS_LOGICAL_LINK_UP		0x00000002
+#define FUNC_STATUS_FORCED_LINK			0x00000004
 
 	u32 mac_upper;      /* MAC */
 #define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
@@ -1594,6 +1596,8 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE       0x00000002
 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK      0xFFFF0000
 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET     16
+/* driver supports virtual link parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK     0x00010000
 	/* Driver attributes params */
 #define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET		 0
 #define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK		0x00FFFFFF
@@ -1727,6 +1731,8 @@ struct public_drv_mb {
 #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ   0x00000001
 /* MFW supports EEE */
 #define FW_MB_PARAM_FEATURE_SUPPORT_EEE         0x00000002
+/* MFW supports virtual link */
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK       0x00010000
 
 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR	(1 << 0)
 
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 48/53] net/qede/base: catch an init command write failure
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (16 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 47/53] net/qede/base: add feature support for per-PF virtual link Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 49/53] net/qede/base: retain dcbx config till actually applied Rasesh Mody
                   ` (4 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

In case ecore_init_rt() fails for some reason, catch the failure and
fail the initialization.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_init_ops.c |    8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 4491a14..91633c1 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -317,10 +317,10 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
 					  b_must_dmae, b_can_dmae);
 		break;
 	case INIT_SRC_RUNTIME:
-		ecore_init_rt(p_hwfn, p_ptt, addr,
-			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
-			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
-			      b_must_dmae);
+		rc = ecore_init_rt(p_hwfn, p_ptt, addr,
+				   OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+				   OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+				   b_must_dmae);
 		break;
 	}
 
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 49/53] net/qede/base: retain dcbx config till actually applied
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (17 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 48/53] net/qede/base: catch an init command write failure Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 50/53] net/qede/base: disable aRFS for NPAR and 100G Rasesh Mody
                   ` (3 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Retain user dcbx configurations till the config is applied to the adapter
i.e. till the negotiation completes. If base driver receives a config
update before negotiation completes, then base driver merges the new config
with the cached config and sends it to management FW.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dcbx.c |   11 +++--------
 drivers/net/qede/base/ecore_mcp.c  |    3 +++
 2 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 54c61bf..c6274bd 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -1220,15 +1220,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
 	u32 resp = 0, param = 0;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 
-	if (!hw_commit) {
-		OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
-			    sizeof(p_hwfn->p_dcbx_info->set));
+	OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
+		    sizeof(p_hwfn->p_dcbx_info->set));
+	if (!hw_commit)
 		return ECORE_SUCCESS;
-	}
-
-	/* clear set-parmas cache */
-	OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
-		    sizeof(struct ecore_dcbx_set));
 
 	OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
 	ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 8a8670d..06e426a 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1984,6 +1984,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
 						    ECORE_DCBX_OPERATIONAL_MIB);
+			/* clear the user-config cache */
+			OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+				    sizeof(struct ecore_dcbx_set));
 			break;
 		case MFW_DRV_MSG_OEM_CFG_UPDATE:
 			ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 50/53] net/qede/base: disable aRFS for NPAR and 100G
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (18 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 49/53] net/qede/base: retain dcbx config till actually applied Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 51/53] net/qede/base: add support for WoL writes Rasesh Mody
                   ` (2 subsequent siblings)
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Disable accelerated RFS for NPAR and 100G using ECORE_MF_DISABLE_ARFS
multi function mode bit.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h     |    2 ++
 drivers/net/qede/base/ecore_cxt.c |   10 +++++++++-
 drivers/net/qede/base/ecore_dev.c |    6 +++++-
 drivers/net/qede/base/ecore_l2.c  |    3 +++
 drivers/net/qede/qede_if.h        |    1 +
 drivers/net/qede/qede_main.c      |    2 ++
 6 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 3b51fc2..dc09847 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -528,6 +528,8 @@ enum ecore_mf_mode_bit {
 
 	/* TODO - if we ever re-utilize any of this logic, we can rename */
 	ECORE_MF_UFP_SPECIFIC,
+
+	ECORE_MF_DISABLE_ARFS,
 };
 
 enum ecore_ufp_mode {
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 3ebeb12..fed7926 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1991,6 +1991,8 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
 	switch (p_hwfn->hw_info.personality) {
 	case ECORE_PCI_ETH:
 		{
+		u32 count = 0;
+
 		struct ecore_eth_pf_params *p_params =
 			    &p_hwfn->pf_params.eth_pf_params;
 
@@ -1999,7 +2001,13 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
 		ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
 					      p_params->num_cons,
 					      p_params->num_vf_cons);
-		p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
+
+		count = p_params->num_arfs_filters;
+
+		if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
+				   &p_hwfn->p_dev->mf_bits))
+			p_hwfn->p_cxt_mngr->arfs_count = count;
+
 		break;
 		}
 	default:
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 0568470..9511110 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -3501,7 +3501,8 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
 					 1 << ECORE_MF_LLH_PROTO_CLSS |
 					 1 << ECORE_MF_LL2_NON_UNICAST |
-					 1 << ECORE_MF_INTER_PF_SWITCH;
+					 1 << ECORE_MF_INTER_PF_SWITCH |
+					 1 << ECORE_MF_DISABLE_ARFS;
 		break;
 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
 		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
@@ -3514,6 +3515,9 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 	DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
 		p_hwfn->p_dev->mf_bits);
 
+	if (ECORE_IS_CMT(p_hwfn->p_dev))
+		p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
+
 	/* It's funny since we have another switch, but it's easier
 	 * to throw this away in linux this way. Long term, it might be
 	 * better to have have getters for needed ECORE_MF_* fields,
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 01fe880..e3afc8a 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -2072,6 +2072,9 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
 			       struct ecore_ptt *p_ptt,
 			       struct ecore_arfs_config_params *p_cfg_params)
 {
+	if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
+		return;
+
 	if (p_cfg_params->arfs_enable) {
 		ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
 				 p_cfg_params->tcp,
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 1f97b59..bf80ccb 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -40,6 +40,7 @@ struct qed_dev_info {
 #define QED_MFW_VERSION_3_OFFSET	24
 
 	uint32_t flash_size;
+	bool b_arfs_capable;
 	bool b_inter_pf_switch;
 	bool tx_switching;
 	u16 mtu;
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 2f6a4dc..a0c9e03 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -378,6 +378,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 	if (IS_PF(edev)) {
 		dev_info->b_inter_pf_switch =
 			OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, &edev->mf_bits);
+		if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &edev->mf_bits))
+			dev_info->b_arfs_capable = true;
 		dev_info->tx_switching = false;
 
 		dev_info->smart_an = ecore_mcp_is_smart_an_supported(p_hwfn);
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 51/53] net/qede/base: add support for WoL writes
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (19 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 50/53] net/qede/base: disable aRFS for NPAR and 100G Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 52/53] net/qede/base: remove unused input parameter Rasesh Mody
  2017-09-19  1:51 ` [PATCH 53/53] net/qede/base: update PMD version to 2.6.0.1 Rasesh Mody
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Add support programing of WoL Bitmap paterns via management FW mailbox.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dev.c  |   26 +++++++++++++-------------
 drivers/net/qede/base/ecore_mcp.c  |   26 ++++++++++++++++++++++++++
 drivers/net/qede/base/ecore_mcp.h  |    3 +++
 drivers/net/qede/base/mcp_public.h |   18 ++++++++++++++----
 4 files changed, 56 insertions(+), 17 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 9511110..e47d5c7 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -3318,8 +3318,8 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 	nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
 
 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
-	    OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
-						       core_cfg);
+		   OFFSETOF(struct nvm_cfg1, glob) +
+		   OFFSETOF(struct nvm_cfg1_glob, core_cfg);
 
 	core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
 
@@ -3399,8 +3399,8 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 	p_caps->speed_capabilities = link->speed.advertised_speeds;
 
 	link_temp = ecore_rd(p_hwfn, p_ptt,
-			     port_cfg_addr +
-			     OFFSETOF(struct nvm_cfg1_port, link_settings));
+				 port_cfg_addr +
+				 OFFSETOF(struct nvm_cfg1_port, link_settings));
 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
@@ -3480,8 +3480,8 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 
 	/* Read Multi-function information from shmem */
 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
-	    OFFSETOF(struct nvm_cfg1, glob) +
-	    OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+		   OFFSETOF(struct nvm_cfg1, glob) +
+		   OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
 
 	generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
 
@@ -3540,25 +3540,25 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 
 	/* Read Multi-function information from shmem */
 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
-	    OFFSETOF(struct nvm_cfg1, glob) +
-	    OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+		   OFFSETOF(struct nvm_cfg1, glob) +
+		   OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
 
 	device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
 		OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
-			     &p_hwfn->hw_info.device_capabilities);
+				&p_hwfn->hw_info.device_capabilities);
 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
 		OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
-			     &p_hwfn->hw_info.device_capabilities);
+				&p_hwfn->hw_info.device_capabilities);
 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
 		OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
-			     &p_hwfn->hw_info.device_capabilities);
+				&p_hwfn->hw_info.device_capabilities);
 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
 		OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
-			     &p_hwfn->hw_info.device_capabilities);
+				&p_hwfn->hw_info.device_capabilities);
 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
 		OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
-			     &p_hwfn->hw_info.device_capabilities);
+				&p_hwfn->hw_info.device_capabilities);
 
 	rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
 	if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 06e426a..3df8fce 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -3764,3 +3764,29 @@ enum _ecore_status_t
 
 	return ECORE_SUCCESS;
 }
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+		      u32 offset, u32 val)
+{
+	struct ecore_mcp_mb_params mb_params = {0};
+	enum _ecore_status_t	   rc = ECORE_SUCCESS;
+	u32			   dword = val;
+
+	mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
+	mb_params.param = offset;
+	mb_params.p_data_src = &dword;
+	mb_params.data_src_size = sizeof(dword);
+
+	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(p_hwfn, false,
+			  "Failed to wol write request, rc = %d\n", rc);
+	}
+
+	if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
+		DP_NOTICE(p_hwfn, false,
+			  "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
+			  val, offset, mb_params.mcp_resp);
+		rc = ECORE_UNKNOWN_ERROR;
+	}
+}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 875b205..6afaf7d 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -565,4 +565,7 @@ enum _ecore_status_t
 void
 ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
 
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+		      u32 offset, u32 val);
+
 #endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index d568179..799357a 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1248,12 +1248,12 @@ struct public_drv_mb {
 #define DRV_MSG_CODE_NVM_PUT_FILE_DATA		0x00020000
 /* MFW will place the file offset and len in file_att struct */
 #define DRV_MSG_CODE_NVM_GET_FILE_ATT		0x00030000
-/* Read 32bytes of nvram data. Param is [0:23] – Offset [24:31] –
- * Len in Bytes
+/* Read 32bytes of nvram data. Param is [0:23] ??? Offset [24:31] -
+ * ??? Len in Bytes
  */
 #define DRV_MSG_CODE_NVM_READ_NVRAM		0x00050000
-/* Writes up to 32Bytes to nvram. Param is [0:23] – Offset [24:31] –
- * Len in Bytes. In case this address is in the range of secured file in
+/* Writes up to 32Bytes to nvram. Param is [0:23] ??? Offset [24:31]
+ * ??? Len in Bytes. In case this address is in the range of secured file in
  * secured mode, the operation will fail
  */
 #define DRV_MSG_CODE_NVM_WRITE_NVRAM		0x00060000
@@ -1431,10 +1431,14 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_PORT_MASK			0x00600000
 #define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE		0x002a0000
 
+#define DRV_MSG_CODE_GET_TLV_DONE		0x002f0000 /* Param: None */
 /* Param: Set DRV_MB_PARAM_FEATURE_SUPPORT_* */
 #define DRV_MSG_CODE_FEATURE_SUPPORT            0x00300000
 /* return FW_MB_PARAM_FEATURE_SUPPORT_*  */
 #define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT	0x00310000
+#define DRV_MSG_CODE_READ_WOL_REG		0X00320000
+#define DRV_MSG_CODE_WRITE_WOL_REG		0X00330000
+#define DRV_MSG_CODE_GET_WOL_BUFFER		0X00340000
 /* Param: [0:23] Attribute key, [24:31] Attribute sub command */
 #define DRV_MSG_CODE_ATTRIBUTE			0x00350000
 
@@ -1714,6 +1718,12 @@ struct public_drv_mb {
 #define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE     0x00870000
 #define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_BAD_ASIC 0x00880000
 
+#define FW_MSG_CODE_WOL_READ_WRITE_OK		0x00820000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_VAL	0x00830000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_ADDR	0x00840000
+#define FW_MSG_CODE_WOL_READ_BUFFER_OK		0x00850000
+#define FW_MSG_CODE_WOL_READ_BUFFER_INVALID_VAL	0x00860000
+
 #define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
 
 #define FW_MSG_CODE_ATTRIBUTE_INVALID_KEY	0x00020000
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 52/53] net/qede/base: remove unused input parameter
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (20 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 51/53] net/qede/base: add support for WoL writes Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  2017-09-19  1:51 ` [PATCH 53/53] net/qede/base: update PMD version to 2.6.0.1 Rasesh Mody
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Remove unused input parameter from ecore_dcbx_info_free().

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dcbx.c |    3 +--
 drivers/net/qede/base/ecore_dcbx.h |    2 +-
 drivers/net/qede/base/ecore_dev.c  |    2 +-
 3 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index c6274bd..637d5bb 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -925,8 +925,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
 	return ECORE_SUCCESS;
 }
 
-void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
-			  struct ecore_dcbx_info *p_dcbx_info)
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn)
 {
 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
 }
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 5986245..bc1a2f9 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -50,7 +50,7 @@ enum _ecore_status_t
 			    enum ecore_mib_read_type);
 
 enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
-void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn);
 void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
 				     struct pf_update_ramrod_data *p_dest);
 
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index e47d5c7..d3abe08 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -472,7 +472,7 @@ void ecore_resc_free(struct ecore_dev *p_dev)
 		ecore_iov_free(p_hwfn);
 		ecore_l2_free(p_hwfn);
 		ecore_dmae_info_free(p_hwfn);
-		ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+		ecore_dcbx_info_free(p_hwfn);
 		/* @@@TBD Flush work-queue ? */
 
 		/* destroy doorbell recovery mechanism */
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 53/53] net/qede/base: update PMD version to 2.6.0.1
  2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
                   ` (21 preceding siblings ...)
  2017-09-19  1:51 ` [PATCH 52/53] net/qede/base: remove unused input parameter Rasesh Mody
@ 2017-09-19  1:51 ` Rasesh Mody
  22 siblings, 0 replies; 24+ messages in thread
From: Rasesh Mody @ 2017-09-19  1:51 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: Rasesh Mody, Dept-EngDPDKDev

Update QEDE PMD version to 2.6.0.1

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/qede_ethdev.h |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index a3254b1..4543533 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -49,8 +49,8 @@
 /* Driver versions */
 #define QEDE_PMD_VER_PREFIX		"QEDE PMD"
 #define QEDE_PMD_VERSION_MAJOR		2
-#define QEDE_PMD_VERSION_MINOR	        5
-#define QEDE_PMD_VERSION_REVISION       2
+#define QEDE_PMD_VERSION_MINOR	        6
+#define QEDE_PMD_VERSION_REVISION       0
 #define QEDE_PMD_VERSION_PATCH	        1
 
 #define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "."     \
-- 
1.7.10.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2017-09-19  1:52 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-19  1:51 [PATCH 30/53] net/qede/base: read per queue coalescing from HW Rasesh Mody
2017-09-19  1:51 ` [PATCH 31/53] net/qede/base: refactor device's number of ports logic Rasesh Mody
2017-09-19  1:51 ` [PATCH 32/53] net/qede/base: use proper units for rate limiting Rasesh Mody
2017-09-19  1:51 ` [PATCH 33/53] net/qede/base: use available macro Rasesh Mody
2017-09-19  1:51 ` [PATCH 34/53] net/qede/base: use function pointers for spq async callback Rasesh Mody
2017-09-19  1:51 ` [PATCH 35/53] net/qede/base: fix API return types Rasesh Mody
2017-09-19  1:51 ` [PATCH 36/53] net/qede/base: semantic changes Rasesh Mody
2017-09-19  1:51 ` [PATCH 37/53] net/qede/base: handle the error condition properly Rasesh Mody
2017-09-19  1:51 ` [PATCH 38/53] net/qede/base: add new macro for CMT mode Rasesh Mody
2017-09-19  1:51 ` [PATCH 39/53] net/qede/base: change verbosity Rasesh Mody
2017-09-19  1:51 ` [PATCH 40/53] net/qede/base: fix number of app table entries Rasesh Mody
2017-09-19  1:51 ` [PATCH 41/53] net/qede/base: update firmware to 8.30.12.0 Rasesh Mody
2017-09-19  1:51 ` [PATCH 42/53] net/qede/base: add UFP support Rasesh Mody
2017-09-19  1:51 ` [PATCH 43/53] net/qede/base: add support for mapped doorbell Bars for VFs Rasesh Mody
2017-09-19  1:51 ` [PATCH 44/53] net/qede/base: add support for driver attribute repository Rasesh Mody
2017-09-19  1:51 ` [PATCH 45/53] net/qede/base: move define to header file Rasesh Mody
2017-09-19  1:51 ` [PATCH 46/53] net/qede/base: dcbx dscp related extensions Rasesh Mody
2017-09-19  1:51 ` [PATCH 47/53] net/qede/base: add feature support for per-PF virtual link Rasesh Mody
2017-09-19  1:51 ` [PATCH 48/53] net/qede/base: catch an init command write failure Rasesh Mody
2017-09-19  1:51 ` [PATCH 49/53] net/qede/base: retain dcbx config till actually applied Rasesh Mody
2017-09-19  1:51 ` [PATCH 50/53] net/qede/base: disable aRFS for NPAR and 100G Rasesh Mody
2017-09-19  1:51 ` [PATCH 51/53] net/qede/base: add support for WoL writes Rasesh Mody
2017-09-19  1:51 ` [PATCH 52/53] net/qede/base: remove unused input parameter Rasesh Mody
2017-09-19  1:51 ` [PATCH 53/53] net/qede/base: update PMD version to 2.6.0.1 Rasesh Mody

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.