All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rasesh Mody <rasesh.mody@cavium.com>
To: <dev@dpdk.org>
Cc: Rasesh Mody <rasesh.mody@cavium.com>, <Dept-EngDPDKDev@cavium.com>
Subject: [PATCH 27/61] net/qede/base: L2 handler changes
Date: Sun, 26 Feb 2017 23:56:43 -0800	[thread overview]
Message-ID: <1488182237-10247-28-git-send-email-rasesh.mody@cavium.com> (raw)
In-Reply-To: <1488182237-10247-1-git-send-email-rasesh.mody@cavium.com>

L2 handler changes:

This is change to remove the queue-id/qzone difference for Tx queues.

It does that by mainly doing:

a. VFs queues are no longer determined by the SBs they're using.
Instead, the ecore-client needs to maintain those and choose the values
to be used by VF when initializing it.

b. Eliminate the HW-cid array in the hw-function.
To do that, have all the rx/tx functionality turn into 'handle' base -
when queue would be started the caller would get a (void*) handle,
which it would later use with ecore for configuring various
queue-related stop [update, close].

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore.h         |   13 -
 drivers/net/qede/base/ecore_dev.c     |   39 ---
 drivers/net/qede/base/ecore_int.c     |   24 --
 drivers/net/qede/base/ecore_int.h     |   10 -
 drivers/net/qede/base/ecore_iov_api.h |   24 +-
 drivers/net/qede/base/ecore_l2.c      |  526 ++++++++++++++++++---------------
 drivers/net/qede/base/ecore_l2.h      |   84 +++---
 drivers/net/qede/base/ecore_l2_api.h  |  108 ++++---
 drivers/net/qede/base/ecore_sriov.c   |  262 ++++++++++------
 drivers/net/qede/base/ecore_sriov.h   |    4 +-
 drivers/net/qede/base/ecore_vf.c      |  119 +++++---
 drivers/net/qede/base/ecore_vf.h      |   55 ++--
 drivers/net/qede/qede_eth_if.c        |   50 ++--
 drivers/net/qede/qede_eth_if.h        |   22 +-
 drivers/net/qede/qede_rxtx.c          |   42 +--
 drivers/net/qede/qede_rxtx.h          |    2 +
 16 files changed, 723 insertions(+), 661 deletions(-)

diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index b8c8bfd..de0f49a 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -394,16 +394,6 @@ struct ecore_hw_info {
 	u16 mtu;
 };
 
-struct ecore_hw_cid_data {
-	u32	cid;
-	bool	b_cid_allocated;
-	u8	vfid; /* 1-based; 0 signals this is for a PF */
-
-	/* Additional identifiers */
-	u16	opaque_fid;
-	u8	vport_id;
-};
-
 /* maximun size of read/write commands (HW limit) */
 #define DMAE_MAX_RW_SIZE	0x2000
 
@@ -566,9 +556,6 @@ struct ecore_hwfn {
 	struct ecore_mcp_info		*mcp_info;
 	struct ecore_dcbx_info		*p_dcbx_info;
 
-	struct ecore_hw_cid_data	*p_tx_cids;
-	struct ecore_hw_cid_data	*p_rx_cids;
-
 	struct ecore_dmae_info		dmae_info;
 
 	/* QM init */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 3591381..168ada8 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -161,15 +161,6 @@ void ecore_resc_free(struct ecore_dev *p_dev)
 	for_each_hwfn(p_dev, i) {
 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 
-		OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
-		p_hwfn->p_tx_cids = OSAL_NULL;
-		OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
-		p_hwfn->p_rx_cids = OSAL_NULL;
-	}
-
-	for_each_hwfn(p_dev, i) {
-		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
 		ecore_cxt_mngr_free(p_hwfn);
 		ecore_qm_info_free(p_hwfn);
 		ecore_spq_free(p_hwfn);
@@ -852,36 +843,6 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
 	if (!p_dev->fw_data)
 		return ECORE_NOMEM;
 
-	/* Allocate Memory for the Queue->CID mapping */
-	for_each_hwfn(p_dev, i) {
-		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-		u32 num_tx_conns = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
-		int tx_size, rx_size;
-
-		/* @@@TMP - resc management, change to actual required size */
-		if (p_hwfn->pf_params.eth_pf_params.num_cons > num_tx_conns)
-			num_tx_conns = p_hwfn->pf_params.eth_pf_params.num_cons;
-		tx_size = sizeof(struct ecore_hw_cid_data) * num_tx_conns;
-		rx_size = sizeof(struct ecore_hw_cid_data) *
-		    RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
-
-		p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
-						tx_size);
-		if (!p_hwfn->p_tx_cids) {
-			DP_NOTICE(p_hwfn, true,
-				  "Failed to allocate memory for Tx Cids\n");
-			goto alloc_no_mem;
-		}
-
-		p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
-						rx_size);
-		if (!p_hwfn->p_rx_cids) {
-			DP_NOTICE(p_hwfn, true,
-				  "Failed to allocate memory for Rx Cids\n");
-			goto alloc_no_mem;
-		}
-	}
-
 	for_each_hwfn(p_dev, i) {
 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 		u32 n_eqes, num_cons;
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index ffcae46..66c4731 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -2186,30 +2186,6 @@ void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
 	p_sb_cnt_info->sb_free_blk = info->free_blks;
 }
 
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
-{
-	struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
-
-	/* Determine origin of SB id */
-	if ((sb_id >= p_info->igu_base_sb) &&
-	    (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
-		return sb_id - p_info->igu_base_sb;
-	} else if ((sb_id >= p_info->igu_base_sb_iov) &&
-		   (sb_id < p_info->igu_base_sb_iov +
-			    p_info->igu_sb_cnt_iov)) {
-		/* We want the first VF queue to be adjacent to the
-		 * last PF queue. Since L2 queues can be partial to
-		 * SBs, we'll use the feature instead.
-		 */
-		return sb_id - p_info->igu_base_sb_iov +
-		       FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
-	} else {
-		DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
-			  sb_id);
-		return 0;
-	}
-}
-
 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
 {
 	int i;
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 45358b9..0c8929e 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -172,16 +172,6 @@ enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn	*p_hwfn,
 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
 
 /**
- * @brief - Returns an Rx queue index appropriate for usage with given SB.
- *
- * @param p_hwfn
- * @param sb_id - absolute index of SB
- *
- * @return index of Rx queue
- */
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
-
-/**
  * @brief - Enable Interrupt & Attention for hw function
  *
  * @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 9775360..b8dc47b 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -88,6 +88,23 @@ struct ecore_public_vf_info {
 	u16 forced_vlan;
 };
 
+struct ecore_iov_vf_init_params {
+	u16 rel_vf_id;
+
+	/* Number of requested Queues; Currently, don't support different
+	 * number of Rx/Tx queues.
+	 */
+	/* TODO - remove this limitation */
+	u16 num_queues;
+
+	/* Allow the client to choose which qzones to use for Rx/Tx,
+	 * and which queue_base to use for Tx queues on a per-queue basis.
+	 * Notice values should be relative to the PF resources.
+	 */
+	u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+	u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+};
+
 #ifdef CONFIG_ECORE_SW_CHANNEL
 /* This is SW channel related only... */
 enum mbx_state {
@@ -175,15 +192,14 @@ void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
  *
  * @param p_hwfn
  * @param p_ptt
- * @param rel_vf_id
- * @param num_rx_queues
+ * @param p_params
  *
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
 					      struct ecore_ptt *p_ptt,
-					      u16 rel_vf_id,
-					      u16 num_rx_queues);
+					      struct ecore_iov_vf_init_params
+						     *p_params);
 
 /**
  * @brief ecore_iov_process_mbx_req - process a request received
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 0220d19..352620a 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -29,6 +29,120 @@
 #define ECORE_MAX_SGES_NUM 16
 #define CRC32_POLY 0x1edc6f41
 
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+				 struct ecore_queue_cid *p_cid)
+{
+	/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+	if (!p_cid->is_vf && IS_PF(p_hwfn->p_dev))
+		ecore_cxt_release_cid(p_hwfn, p_cid->cid);
+	OSAL_VFREE(p_hwfn->p_dev, p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+			u16 opaque_fid, u32 cid, u8 vf_qid,
+			struct ecore_queue_start_common_params *p_params)
+{
+	bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
+	struct ecore_queue_cid *p_cid;
+	enum _ecore_status_t rc;
+
+	p_cid = OSAL_VALLOC(p_hwfn->p_dev, sizeof(*p_cid));
+	if (p_cid == OSAL_NULL)
+		return OSAL_NULL;
+	OSAL_MEM_ZERO(p_cid, sizeof(*p_cid));
+
+	p_cid->opaque_fid = opaque_fid;
+	p_cid->cid = cid;
+	p_cid->vf_qid = vf_qid;
+	p_cid->rel = *p_params;
+
+	/* Don't try calculating the absolute indices for VFs */
+	if (IS_VF(p_hwfn->p_dev)) {
+		p_cid->abs = p_cid->rel;
+		goto out;
+	}
+
+	/* Calculate the engine-absolute indices of the resources.
+	 * The would guarantee they're valid later on.
+	 * In some cases [SBs] we already have the right values.
+	 */
+	rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+	if (rc != ECORE_SUCCESS)
+		goto fail;
+
+	rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
+			       &p_cid->abs.queue_id);
+	if (rc != ECORE_SUCCESS)
+		goto fail;
+
+	/* In case of a PF configuring its VF's queues, the stats-id is already
+	 * absolute [since there's a single index that's suitable per-VF].
+	 */
+	if (b_is_same) {
+		rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
+				    &p_cid->abs.stats_id);
+		if (rc != ECORE_SUCCESS)
+			goto fail;
+	} else {
+		p_cid->abs.stats_id = p_cid->rel.stats_id;
+	}
+
+	/* SBs relevant information was already provided as absolute */
+	p_cid->abs.sb = p_cid->rel.sb;
+	p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+	/* This is tricky - we're actually interested in whehter this is a PF
+	 * entry meant for the VF.
+	 */
+	if (!b_is_same)
+		p_cid->is_vf = true;
+out:
+	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+		   p_cid->opaque_fid, p_cid->cid,
+		   p_cid->rel.vport_id, p_cid->abs.vport_id,
+		   p_cid->rel.queue_id, p_cid->abs.queue_id,
+		   p_cid->rel.stats_id, p_cid->abs.stats_id,
+		   p_cid->abs.sb, p_cid->abs.sb_idx);
+
+	return p_cid;
+
+fail:
+	OSAL_VFREE(p_hwfn->p_dev, p_cid);
+	return OSAL_NULL;
+}
+
+static struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+		       u16 opaque_fid,
+		       struct ecore_queue_start_common_params *p_params)
+{
+	struct ecore_queue_cid *p_cid;
+	u32 cid = 0;
+
+	/* Get a unique firmware CID for this queue, in case it's a PF.
+	 * VF's don't need a CID as the queue configuration will be done
+	 * by PF.
+	 */
+	if (IS_PF(p_hwfn->p_dev)) {
+		if (ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+					  &cid) != ECORE_SUCCESS) {
+			DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+			return OSAL_NULL;
+		}
+	}
+
+	p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
+	if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev))
+		ecore_cxt_release_cid(p_hwfn, cid);
+
+	return p_cid;
+}
+
 enum _ecore_status_t
 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
 			 struct ecore_sp_vport_start_params *p_params)
@@ -558,57 +672,28 @@ enum _ecore_status_t
 	return 0;
 }
 
-static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
-				       struct ecore_hw_cid_data *p_cid_data)
-{
-	if (!p_cid_data->b_cid_allocated)
-		return;
-
-	ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
-	p_cid_data->b_cid_allocated = false;
-}
-
 enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
-			      u16 opaque_fid,
-			      u32 cid,
-			      struct ecore_queue_start_common_params *p_params,
-			      u16 bd_max_bytes,
-			      dma_addr_t bd_chain_phys_addr,
-			      dma_addr_t cqe_pbl_addr,
-			      u16 cqe_pbl_size, bool b_use_zone_a_prod)
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			   struct ecore_queue_cid *p_cid,
+			   u16 bd_max_bytes,
+			   dma_addr_t bd_chain_phys_addr,
+			   dma_addr_t cqe_pbl_addr,
+			   u16 cqe_pbl_size)
 {
 	struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
-	struct ecore_hw_cid_data *p_rx_cid;
-	u16 abs_rx_q_id = 0;
-	u8 abs_vport_id = 0;
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
-	/* Store information for the stop */
-	p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
-	p_rx_cid->cid = cid;
-	p_rx_cid->opaque_fid = opaque_fid;
-	p_rx_cid->vport_id = p_params->vport_id;
-
-	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
-	if (rc != ECORE_SUCCESS)
-		return rc;
-
-	rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
-	if (rc != ECORE_SUCCESS)
-		return rc;
-
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
-		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-		   opaque_fid, cid, p_params->queue_id,
-		   p_params->vport_id, p_params->sb);
+		   "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+		   p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
+		   p_cid->abs.vport_id, p_cid->abs.sb);
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
-	init_data.cid = cid;
-	init_data.opaque_fid = opaque_fid;
+	init_data.cid = p_cid->cid;
+	init_data.opaque_fid = p_cid->opaque_fid;
 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 
 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -619,11 +704,11 @@ enum _ecore_status_t
 
 	p_ramrod = &p_ent->ramrod.rx_queue_start;
 
-	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
-	p_ramrod->sb_index = (u8)p_params->sb_idx;
-	p_ramrod->vport_id = abs_vport_id;
-	p_ramrod->stats_counter_id = p_params->stats_id;
-	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+	p_ramrod->sb_index = p_cid->abs.sb_idx;
+	p_ramrod->vport_id = p_cid->abs.vport_id;
+	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
 	p_ramrod->complete_cqe_flg = 0;
 	p_ramrod->complete_event_flg = 1;
 
@@ -633,92 +718,88 @@ enum _ecore_status_t
 	p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
 
-	if (p_params->vf_qid || b_use_zone_a_prod) {
-		p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
+	if (p_cid->is_vf) {
+		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 			   "Queue%s is meant for VF rxq[%02x]\n",
-			   b_use_zone_a_prod ? " [legacy]" : "",
-			   p_params->vf_qid);
-		p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+			   !!p_cid->b_legacy_vf ? " [legacy]" : "",
+			   p_cid->vf_qid);
+		p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
 	}
 
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
-			    u16 opaque_fid,
-			    struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
+			    struct ecore_queue_cid *p_cid,
 			    u16 bd_max_bytes,
 			    dma_addr_t bd_chain_phys_addr,
 			    dma_addr_t cqe_pbl_addr,
 			    u16 cqe_pbl_size,
-			    void OSAL_IOMEM * *pp_prod)
+			    void OSAL_IOMEM * *pp_producer)
 {
-	struct ecore_hw_cid_data *p_rx_cid;
 	u32 init_prod_val = 0;
-	u16 abs_l2_queue = 0;
-	u8 abs_stats_id = 0;
-	enum _ecore_status_t rc;
-
-	if (IS_VF(p_hwfn->p_dev)) {
-		return ecore_vf_pf_rxq_start(p_hwfn,
-					     (u8)p_params->queue_id,
-					     p_params->sb,
-					     (u8)p_params->sb_idx,
-					     bd_max_bytes,
-					     bd_chain_phys_addr,
-					     cqe_pbl_addr,
-					     cqe_pbl_size, pp_prod);
-	}
-
-	rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
-	if (rc != ECORE_SUCCESS)
-		return rc;
 
-	rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
-	if (rc != ECORE_SUCCESS)
-		return rc;
-
-	*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
-	    GTT_BAR0_MAP_REG_MSDM_RAM +
-	    MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+	*pp_producer = (u8 OSAL_IOMEM *)
+		       p_hwfn->regview +
+		       GTT_BAR0_MAP_REG_MSDM_RAM +
+		       MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
 
 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
-	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+	__internal_ram_wr(p_hwfn, *pp_producer, sizeof(u32),
 			  (u32 *)(&init_prod_val));
 
+	return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+					  bd_max_bytes,
+					  bd_chain_phys_addr,
+					  cqe_pbl_addr, cqe_pbl_size);
+}
+
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+			 u16 opaque_fid,
+			 struct ecore_queue_start_common_params *p_params,
+			 u16 bd_max_bytes,
+			 dma_addr_t bd_chain_phys_addr,
+			 dma_addr_t cqe_pbl_addr,
+			 u16 cqe_pbl_size,
+			 struct ecore_rxq_start_ret_params *p_ret_params)
+{
+	struct ecore_queue_cid *p_cid;
+	enum _ecore_status_t rc;
+
 	/* Allocate a CID for the queue */
-	p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
-	rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
-				   &p_rx_cid->cid);
-	if (rc != ECORE_SUCCESS) {
-		DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
-		return rc;
-	}
-	p_rx_cid->b_cid_allocated = true;
-	p_params->stats_id = abs_stats_id;
-	p_params->vf_qid = 0;
-
-	rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
-					   opaque_fid,
-					   p_rx_cid->cid,
-					   p_params,
+	p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+	if (p_cid == OSAL_NULL)
+		return ECORE_NOMEM;
+
+	if (IS_PF(p_hwfn->p_dev))
+		rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
+						 bd_max_bytes,
+						 bd_chain_phys_addr,
+						 cqe_pbl_addr, cqe_pbl_size,
+						 &p_ret_params->p_prod);
+	else
+		rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
 					   bd_max_bytes,
 					   bd_chain_phys_addr,
 					   cqe_pbl_addr,
 					   cqe_pbl_size,
-					   false);
+					   &p_ret_params->p_prod);
 
+	/* Provide the caller with a reference to as handler */
 	if (rc != ECORE_SUCCESS)
-		ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+		ecore_eth_queue_cid_release(p_hwfn, p_cid);
+	else
+		p_ret_params->p_handle = (void *)p_cid;
 
 	return rc;
 }
 
 enum _ecore_status_t
 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
-			      u16 rx_queue_id,
+			      void **pp_rxq_handles,
 			      u8 num_rxqs,
 			      u8 complete_cqe_flg,
 			      u8 complete_event_flg,
@@ -728,14 +809,14 @@ enum _ecore_status_t
 	struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
-	struct ecore_hw_cid_data *p_rx_cid;
-	u16 qid, abs_rx_q_id = 0;
+	struct ecore_queue_cid *p_cid;
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
 	u8 i;
 
 	if (IS_VF(p_hwfn->p_dev))
 		return ecore_vf_pf_rxqs_update(p_hwfn,
-					       rx_queue_id,
+					       (struct ecore_queue_cid **)
+					       pp_rxq_handles,
 					       num_rxqs,
 					       complete_cqe_flg,
 					       complete_event_flg);
@@ -745,12 +826,11 @@ enum _ecore_status_t
 	init_data.p_comp_data = p_comp_data;
 
 	for (i = 0; i < num_rxqs; i++) {
-		qid = rx_queue_id + i;
-		p_rx_cid = &p_hwfn->p_rx_cids[qid];
+		p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
 
 		/* Get SPQ entry */
-		init_data.cid = p_rx_cid->cid;
-		init_data.opaque_fid = p_rx_cid->opaque_fid;
+		init_data.cid = p_cid->cid;
+		init_data.opaque_fid = p_cid->opaque_fid;
 
 		rc = ecore_sp_init_request(p_hwfn, &p_ent,
 					   ETH_RAMROD_RX_QUEUE_UPDATE,
@@ -759,41 +839,34 @@ enum _ecore_status_t
 			return rc;
 
 		p_ramrod = &p_ent->ramrod.rx_queue_update;
+		p_ramrod->vport_id = p_cid->abs.vport_id;
 
-		ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
-		ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
-		p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+		p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
 		p_ramrod->complete_event_flg = complete_event_flg;
 
 		rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
-		if (rc)
+		if (rc != ECORE_SUCCESS)
 			return rc;
 	}
 
 	return rc;
 }
 
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
-			   u16 rx_queue_id,
-			   bool eq_completion_only, bool cqe_completion)
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+			   struct ecore_queue_cid *p_cid,
+			   bool b_eq_completion_only,
+			   bool b_cqe_completion)
 {
-	struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
 	struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
-	u16 abs_rx_q_id = 0;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
-
-	if (IS_VF(p_hwfn->p_dev))
-		return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
-					    cqe_completion);
+	enum _ecore_status_t rc;
 
-	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
-	init_data.cid = p_rx_cid->cid;
-	init_data.opaque_fid = p_rx_cid->opaque_fid;
+	init_data.cid = p_cid->cid;
+	init_data.opaque_fid = p_cid->opaque_fid;
 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 
 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -803,64 +876,54 @@ enum _ecore_status_t
 		return rc;
 
 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
-
-	ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
-	ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
-	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+	p_ramrod->vport_id = p_cid->abs.vport_id;
+	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
 
 	/* Cleaning the queue requires the completion to arrive there.
 	 * In addition, VFs require the answer to come as eqe to PF.
 	 */
-	p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
-					 p_hwfn->hw_info.opaque_fid) &&
-				      !eq_completion_only) || cqe_completion;
-	p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
-					 p_hwfn->hw_info.opaque_fid) ||
-	    eq_completion_only;
+	p_ramrod->complete_cqe_flg = (!p_cid->is_vf && !b_eq_completion_only) ||
+				     b_cqe_completion;
+	p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
 
-	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
-	if (rc != ECORE_SUCCESS)
-		return rc;
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
 
-	ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+					     void *p_rxq,
+					     bool eq_completion_only,
+					     bool cqe_completion)
+{
+	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
+	enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+	if (IS_PF(p_hwfn->p_dev))
+		rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+						eq_completion_only,
+						cqe_completion);
+	else
+		rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
 
+	if (rc == ECORE_SUCCESS)
+		ecore_eth_queue_cid_release(p_hwfn, p_cid);
 	return rc;
 }
 
 enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
-			      u16 opaque_fid,
-			      u32 cid,
-			      struct ecore_queue_start_common_params *p_params,
-			      dma_addr_t pbl_addr,
-			      u16 pbl_size,
-			      u16 pq_id)
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			   struct ecore_queue_cid *p_cid,
+			   dma_addr_t pbl_addr, u16 pbl_size,
+			   u16 pq_id)
 {
 	struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
-	struct ecore_hw_cid_data *p_tx_cid;
-	u16 abs_tx_qzone_id = 0;
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
-	u8 abs_vport_id;
-
-	/* Store information for the stop */
-	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
-	p_tx_cid->cid = cid;
-	p_tx_cid->opaque_fid = opaque_fid;
-
-	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
-	if (rc != ECORE_SUCCESS)
-		return rc;
-
-	rc = ecore_fw_l2_queue(p_hwfn, p_params->qzone_id, &abs_tx_qzone_id);
-	if (rc != ECORE_SUCCESS)
-		return rc;
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
-	init_data.cid = cid;
-	init_data.opaque_fid = opaque_fid;
+	init_data.cid = p_cid->cid;
+	init_data.opaque_fid = p_cid->opaque_fid;
 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 
 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -870,14 +933,14 @@ enum _ecore_status_t
 		return rc;
 
 	p_ramrod = &p_ent->ramrod.tx_queue_start;
-	p_ramrod->vport_id = abs_vport_id;
+	p_ramrod->vport_id = p_cid->abs.vport_id;
 
-	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
-	p_ramrod->sb_index = (u8)p_params->sb_idx;
-	p_ramrod->stats_counter_id = p_params->stats_id;
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+	p_ramrod->sb_index = p_cid->abs.sb_idx;
+	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
 
-	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_qzone_id);
-	p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(abs_tx_qzone_id);
+	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+	p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
 
 	p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
@@ -887,90 +950,72 @@ enum _ecore_status_t
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
-enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
-			    u16 opaque_fid,
-			    struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
+			    struct ecore_queue_cid *p_cid,
 			    u8 tc,
-			    dma_addr_t pbl_addr,
-			    u16 pbl_size,
+			    dma_addr_t pbl_addr, u16 pbl_size,
 			    void OSAL_IOMEM * *pp_doorbell)
 {
-	struct ecore_hw_cid_data *p_tx_cid;
-	u8 abs_stats_id = 0;
 	enum _ecore_status_t rc;
 
-	if (IS_VF(p_hwfn->p_dev)) {
-		return ecore_vf_pf_txq_start(p_hwfn,
-					     p_params->queue_id,
-					     p_params->sb,
-					     (u8)p_params->sb_idx,
-					     pbl_addr,
-					     pbl_size,
-					     pp_doorbell);
-	}
-
-	rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
+	/* TODO - set tc in the pq_params for multi-cos */
+	rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+					pbl_addr, pbl_size,
+					ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
-	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
-	OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
+	/* Provide the caller with the necessary return values */
+	*pp_doorbell = (u8 OSAL_IOMEM *)
+		       p_hwfn->doorbells +
+		       DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
 
-	/* Allocate a CID for the queue */
-	rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
-	if (rc != ECORE_SUCCESS) {
-		DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
-		return rc;
-	}
-	p_tx_cid->b_cid_allocated = true;
+	return ECORE_SUCCESS;
+}
 
-	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
-		   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-		    opaque_fid, p_tx_cid->cid, p_params->queue_id,
-		    p_params->vport_id, p_params->sb);
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+			 struct ecore_queue_start_common_params *p_params,
+			 u8 tc,
+			 dma_addr_t pbl_addr, u16 pbl_size,
+			 struct ecore_txq_start_ret_params *p_ret_params)
+{
+	struct ecore_queue_cid *p_cid;
+	enum _ecore_status_t rc;
 
-	p_params->stats_id = abs_stats_id;
+	p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+	if (p_cid == OSAL_NULL)
+		return ECORE_INVAL;
 
-	/* TODO - set tc in the pq_params for multi-cos */
-	rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
-					   opaque_fid,
-					   p_tx_cid->cid,
-					   p_params,
-					   pbl_addr,
-					   pbl_size,
-					   ecore_get_cm_pq_idx_mcos(p_hwfn,
-								    tc));
-
-	*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
-	    DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+	if (IS_PF(p_hwfn->p_dev))
+		rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+						 pbl_addr, pbl_size,
+						 &p_ret_params->p_doorbell);
+	else
+		rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
+					   pbl_addr, pbl_size,
+					   &p_ret_params->p_doorbell);
 
 	if (rc != ECORE_SUCCESS)
-		ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+		ecore_eth_queue_cid_release(p_hwfn, p_cid);
+	else
+		p_ret_params->p_handle = (void *)p_cid;
 
 	return rc;
 }
 
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
-{
-	return ECORE_NOTIMPL;
-}
-
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
-						u16 tx_queue_id)
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+			   struct ecore_queue_cid *p_cid)
 {
-	struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
 	struct ecore_spq_entry *p_ent = OSAL_NULL;
 	struct ecore_sp_init_data init_data;
-	enum _ecore_status_t rc = ECORE_NOTIMPL;
-
-	if (IS_VF(p_hwfn->p_dev))
-		return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+	enum _ecore_status_t rc;
 
-	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
-	init_data.cid = p_tx_cid->cid;
-	init_data.opaque_fid = p_tx_cid->opaque_fid;
+	init_data.cid = p_cid->cid;
+	init_data.opaque_fid = p_cid->opaque_fid;
 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
 
 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -979,11 +1024,22 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
-	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
-	if (rc != ECORE_SUCCESS)
-		return rc;
+	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+					     void *p_handle)
+{
+	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+	enum _ecore_status_t rc;
+
+	if (IS_PF(p_hwfn->p_dev))
+		rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+	else
+		rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
 
-	ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+	if (rc == ECORE_SUCCESS)
+		ecore_eth_queue_cid_release(p_hwfn, p_cid);
 	return rc;
 }
 
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index b598eda..c136389 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -15,59 +15,66 @@
 #include "ecore_spq.h"
 #include "ecore_l2_api.h"
 
-/**
- * @brief ecore_sp_eth_tx_queue_update -
- *
- * This ramrod updates a TX queue. It is used for setting the active
- * state of the queue.
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+struct ecore_queue_cid {
+	/* 'Relative' is a relative term ;-). Usually the indices [not counting
+	 * SBs] would be PF-relative, but there are some cases where that isn't
+	 * the case - specifically for a PF configuring its VF indices it's
+	 * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+	 */
+	struct ecore_queue_start_common_params rel;
+	struct ecore_queue_start_common_params abs;
+	u32 cid;
+	u16 opaque_fid;
+
+	/* VFs queues are mapped differently, so we need to know the
+	 * relative queue associated with them [0-based].
+	 * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+	 * and not on the VF itself.
+	 */
+	bool is_vf;
+	u8 vf_qid;
+
+	/* Legacy VFs might have Rx producer located elsewhere */
+	bool b_legacy_vf;
+};
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+				 struct ecore_queue_cid *p_cid);
+
+struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+			u16 opaque_fid, u32 cid, u8 vf_qid,
+			struct ecore_queue_start_common_params *p_params);
 
 enum _ecore_status_t
 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
 			 struct ecore_sp_vport_start_params *p_params);
 
 /**
- * @brief - Starts an Rx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
  *
  * @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
-	  stats_id is absolute packed in p_params.
+ * @param p_cid
  * @param bd_max_bytes
  * @param bd_chain_phys_addr
  * @param cqe_pbl_addr
  * @param cqe_pbl_size
- * @param b_use_zone_a_prod - support legacy VF producers
  *
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn	*p_hwfn,
-			      u16 opaque_fid,
-			      u32 cid,
-			      struct ecore_queue_start_common_params *p_params,
-			      u16 bd_max_bytes,
-			      dma_addr_t bd_chain_phys_addr,
-			      dma_addr_t cqe_pbl_addr,
-			      u16 cqe_pbl_size, bool b_use_zone_a_prod);
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			   struct ecore_queue_cid *p_cid,
+			   u16 bd_max_bytes,
+			   dma_addr_t bd_chain_phys_addr,
+			   dma_addr_t cqe_pbl_addr,
+			   u16 cqe_pbl_size);
 
 /**
- * @brief - Starts a Tx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
  *
  * @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
+ * @param p_cid
  * @param pbl_addr
  * @param pbl_size
  * @param p_pq_params - parameters for choosing the PQ for this Tx queue
@@ -75,13 +82,10 @@ enum _ecore_status_t
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn	*p_hwfn,
-			      u16 opaque_fid,
-			      u32 cid,
-			      struct ecore_queue_start_common_params *p_params,
-			      dma_addr_t pbl_addr,
-			      u16 pbl_size,
-			      u16 pq_id);
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+			   struct ecore_queue_cid *p_cid,
+			   dma_addr_t pbl_addr, u16 pbl_size,
+			   u16 pq_id);
 
 u8 ecore_mcast_bin_from_mac(u8 *mac);
 
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 8f7b614..af316d3 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -28,22 +28,26 @@ enum ecore_rss_caps {
 #endif
 
 struct ecore_queue_start_common_params {
-	/* Rx/Tx queue relative id to keep obtained cid in corresponding array
-	 * RX - upper-bounded by number of FW-queues
-	 */
-	u16 queue_id;
+	/* Should always be relative to entity sending this. */
 	u8 vport_id;
+	u16 queue_id;
 
-	/* q_zone_id is relative, may be different from queue id
-	 * currently used by Tx-only, upper-bounded by number of FW-queues
-	 */
-	u16 qzone_id;
-
-	/* stats_id is relative or absolute depends on function */
+	/* Relative, but relevant only for PFs */
 	u8 stats_id;
+
+	/* These are always absolute */
 	u16 sb;
-	u16 sb_idx;
-	u16 vf_qid;
+	u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+	void OSAL_IOMEM *p_prod;
+	void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+	void OSAL_IOMEM *p_doorbell;
+	void *p_handle;
 };
 
 struct ecore_rss_params {
@@ -167,42 +171,37 @@ enum _ecore_status_t
 	struct ecore_spq_comp_cb	 *p_comp_data);
 
 /**
- * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
  *
  * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
  * the VPort ID is not currently initialized.
  *
  * @param p_hwfn
  * @param opaque_fid
- * @p_params			[stats_id is relative, packed in p_params]
+ * @p_params			Inputs; Relative for PF [SB being an exception]
  * @param bd_max_bytes		Maximum bytes that can be placed on a BD
  * @param bd_chain_phys_addr	Physical address of BDs for receive.
  * @param cqe_pbl_addr		Physical address of the CQE PBL Table.
  * @param cqe_pbl_size		Size of the CQE PBL Table
- * @param pp_prod		Pointer to place producer's
- *                              address for the Rx Q (May be
- *				NULL).
+ * @param p_ret_params		Pointed struct to be filled with outputs.
  *
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
-			    u16 opaque_fid,
-			    struct ecore_queue_start_common_params *p_params,
-			    u16 bd_max_bytes,
-			    dma_addr_t bd_chain_phys_addr,
-			    dma_addr_t cqe_pbl_addr,
-			    u16 cqe_pbl_size,
-			    void OSAL_IOMEM * *pp_prod);
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+			 u16 opaque_fid,
+			 struct ecore_queue_start_common_params *p_params,
+			 u16 bd_max_bytes,
+			 dma_addr_t bd_chain_phys_addr,
+			 dma_addr_t cqe_pbl_addr,
+			 u16 cqe_pbl_size,
+			 struct ecore_rxq_start_ret_params *p_ret_params);
 
 /**
- * @brief ecore_sp_eth_rx_queue_stop -
- *
- * This ramrod closes an RX queue. It sends RX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
  *
  * @param p_hwfn
- * @param rx_queue_id		RX Queue ID
+ * @param p_rxq			Handler of queue to close
  * @param eq_completion_only	If True completion will be on
  *				EQe, if False completion will be
  *				on EQe if p_hwfn opaque
@@ -213,13 +212,13 @@ enum _ecore_status_t
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
-			   u16 rx_queue_id,
-			   bool eq_completion_only,
-			   bool cqe_completion);
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+			void *p_rxq,
+			bool eq_completion_only,
+			bool cqe_completion);
 
 /**
- * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ * @brief - TX Queue Start Ramrod
  *
  * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
  * the VPort is not currently initialized.
@@ -230,34 +229,29 @@ enum _ecore_status_t
  * @param tc			traffic class to use with this L2 txq
  * @param pbl_addr		address of the pbl array
  * @param pbl_size		number of entries in pbl
- * @param pp_doorbell		Pointer to place doorbell pointer (May be NULL).
- *				This address should be used with the
- *				DIRECT_REG_WR macro.
+ * @param p_ret_params		Pointer to fill the return parameters in.
  *
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
-			    u16 opaque_fid,
-			    struct ecore_queue_start_common_params *p_params,
-			    u8 tc,
-			    dma_addr_t pbl_addr,
-			    u16 pbl_size,
-			    void OSAL_IOMEM * *pp_doorbell);
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+			 u16 opaque_fid,
+			 struct ecore_queue_start_common_params *p_params,
+			 u8 tc,
+			 dma_addr_t pbl_addr,
+			 u16 pbl_size,
+			 struct ecore_txq_start_ret_params *p_ret_params);
 
 /**
- * @brief ecore_sp_eth_tx_queue_stop -
- *
- * This ramrod closes a TX queue. It sends TX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
  *
  * @param p_hwfn
- * @param tx_queue_id		TX Queue ID
+ * @param p_txq - handle to Tx queue needed to be closed
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
-						u16 tx_queue_id);
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+					     void *p_txq);
 
 enum ecore_tpa_mode	{
 	ECORE_TPA_MODE_NONE,
@@ -389,19 +383,19 @@ enum _ecore_status_t
  * @note Final phase API.
  *
  * @param p_hwfn
- * @param rx_queue_id		RX Queue ID
- * @param num_rxqs              Allow to update multiple rx
- *				queues, from rx_queue_id to
- *				(rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers	An array of queue handlers to be updated.
+ * @param num_rxqs              number of queues to update.
  * @param complete_cqe_flg	Post completion to the CQE Ring if set
  * @param complete_event_flg	Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
  *
  * @return enum _ecore_status_t
  */
 
 enum _ecore_status_t
 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
-			      u16 rx_queue_id,
+			      void **pp_rxq_handlers,
 			      u8 num_rxqs,
 			      u8 complete_cqe_flg,
 			      u8 complete_event_flg,
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 87ffa34..7a20d56 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -238,7 +238,7 @@ static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
 	u8 i;
 
 	for (i = 0; i < p_vf->num_rxqs; i++)
-		if (p_vf->vf_queues[i].rxq_active)
+		if (p_vf->vf_queues[i].p_rx_cid)
 			return true;
 
 	return false;
@@ -250,7 +250,7 @@ static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
 	u8 i;
 
 	for (i = 0; i < p_vf->num_rxqs; i++)
-		if (p_vf->vf_queues[i].txq_active)
+		if (p_vf->vf_queues[i].p_tx_cid)
 			return true;
 
 	return false;
@@ -956,17 +956,19 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
 	vf->num_sbs = 0;
 }
 
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
-					      struct ecore_ptt *p_ptt,
-					      u16 rel_vf_id, u16 num_rx_queues)
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+			 struct ecore_ptt *p_ptt,
+			 struct ecore_iov_vf_init_params *p_params)
 {
 	u8 num_of_vf_available_chains  = 0;
 	struct ecore_vf_info *vf = OSAL_NULL;
+	u16 qid, num_irqs;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 	u32 cids;
 	u8 i;
 
-	vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+	vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
 	if (!vf) {
 		DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
 		return ECORE_UNKNOWN_ERROR;
@@ -974,22 +976,52 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
 
 	if (vf->b_init) {
 		DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
-			  rel_vf_id);
+			  p_params->rel_vf_id);
 		return ECORE_INVAL;
 	}
 
+	/* Perform sanity checking on the requested queue_id */
+	for (i = 0; i < p_params->num_queues; i++) {
+		u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
+		u16 max_vf_qzone = min_vf_qzone +
+				   FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
+
+		qid = p_params->req_rx_queue[i];
+		if (qid < min_vf_qzone || qid > max_vf_qzone) {
+			DP_NOTICE(p_hwfn, true,
+				  "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+				  qid, p_params->rel_vf_id,
+				  min_vf_qzone, max_vf_qzone);
+			return ECORE_INVAL;
+		}
+
+		qid = p_params->req_tx_queue[i];
+		if (qid > max_vf_qzone) {
+			DP_NOTICE(p_hwfn, true,
+				  "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+				  qid, p_params->rel_vf_id, max_vf_qzone);
+			return ECORE_INVAL;
+		}
+
+		/* If client *really* wants, Tx qid can be shared with PF */
+		if (qid < min_vf_qzone)
+			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+				   "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+				   p_params->rel_vf_id, qid, i);
+	}
+
 	/* Limit number of queues according to number of CIDs */
 	ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 		   "VF[%d] - requesting to initialize for 0x%04x queues"
 		   " [0x%04x CIDs available]\n",
-		   vf->relative_vf_id, num_rx_queues, (u16)cids);
-	num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+		   vf->relative_vf_id, p_params->num_queues, (u16)cids);
+	num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
 
 	num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
 							       p_ptt,
 							       vf,
-							       num_rx_queues);
+							       num_irqs);
 	if (num_of_vf_available_chains == 0) {
 		DP_ERR(p_hwfn, "no available igu sbs\n");
 		return ECORE_NOMEM;
@@ -1000,26 +1032,19 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
 	vf->num_txqs = num_of_vf_available_chains;
 
 	for (i = 0; i < vf->num_rxqs; i++) {
-		u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
-							     vf->igu_sbs[i]);
+		struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
 
-		if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
-			DP_NOTICE(p_hwfn, true,
-				  "VF[%d] will require utilizing of"
-				  " out-of-bounds queues - %04x\n",
-				  vf->relative_vf_id, queue_id);
-			/* TODO - cleanup the already allocate SBs */
-			return ECORE_INVAL;
-		}
+		p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+		p_queue->fw_tx_qid = p_params->req_tx_queue[i];
 
 		/* CIDs are per-VF, so no problem having them 0-based. */
-		vf->vf_queues[i].fw_rx_qid = queue_id;
-		vf->vf_queues[i].fw_tx_qid = queue_id;
-		vf->vf_queues[i].fw_cid = i;
+		p_queue->fw_cid = i;
 
 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-			   "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
-			   vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+			   "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]  CID %04x\n",
+			   vf->relative_vf_id, i, vf->igu_sbs[i],
+			   p_queue->fw_rx_qid, p_queue->fw_tx_qid,
+			   p_queue->fw_cid);
 	}
 
 	rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
@@ -1393,8 +1418,19 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
 	p_vf->num_active_rxqs = 0;
 
 	for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
-		p_vf->vf_queues[i].rxq_active = 0;
-		p_vf->vf_queues[i].txq_active = 0;
+		struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
+
+		if (p_queue->p_rx_cid) {
+			ecore_eth_queue_cid_release(p_hwfn,
+						    p_queue->p_rx_cid);
+			p_queue->p_rx_cid = OSAL_NULL;
+		}
+
+		if (p_queue->p_tx_cid) {
+			ecore_eth_queue_cid_release(p_hwfn,
+						    p_queue->p_tx_cid);
+			p_queue->p_tx_cid = OSAL_NULL;
+		}
 	}
 
 	OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
@@ -1832,14 +1868,14 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 
 		/* Update all the Rx queues */
 		for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
-			u16 qid;
+			struct ecore_queue_cid *p_cid;
 
-			if (!p_vf->vf_queues[i].rxq_active)
+			p_cid = p_vf->vf_queues[i].p_rx_cid;
+			if (p_cid == OSAL_NULL)
 				continue;
 
-			qid = p_vf->vf_queues[i].fw_rx_qid;
-
-			rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+			rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+							   (void **)&p_cid,
 						   1, 0, 1,
 						   ECORE_SPQ_MODE_EBLOCK,
 						   OSAL_NULL);
@@ -1847,7 +1883,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 				DP_NOTICE(p_hwfn, true,
 					  "Failed to send Rx update"
 					  " fo queue[0x%04x]\n",
-					  qid);
+					  p_cid->rel.queue_id);
 				return rc;
 			}
 		}
@@ -2041,6 +2077,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	struct ecore_queue_start_common_params params;
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 	u8 status = PFVF_STATUS_NO_RESOURCE;
+	struct ecore_vf_q_info *p_queue;
 	struct vfpf_start_rxq_tlv *req;
 	bool b_legacy_vf = false;
 	enum _ecore_status_t rc;
@@ -2051,14 +2088,24 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 		goto out;
 
+	/* Acquire a new queue-cid */
+	p_queue = &vf->vf_queues[req->rx_qid];
+
 	OSAL_MEMSET(&params, 0, sizeof(params));
-	params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
-	params.vf_qid = req->rx_qid;
+	params.queue_id = (u8)p_queue->fw_rx_qid;
 	params.vport_id = vf->vport_id;
 	params.stats_id = vf->abs_vf_id + 0x10;
 	params.sb = req->hw_sb;
 	params.sb_idx = req->sb_index;
 
+	p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
+						    vf->opaque_fid,
+						    p_queue->fw_cid,
+						    (u8)req->rx_qid,
+						    &params);
+	if (p_queue->p_rx_cid == OSAL_NULL)
+		goto out;
+
 	/* Legacy VFs have their Producers in a different location, which they
 	 * calculate on their own and clean the producer prior to this.
 	 */
@@ -2070,27 +2117,27 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 		       GTT_BAR0_MAP_REG_MSDM_RAM +
 		       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
 		       0);
+	p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
 
-	rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
-					   vf->vf_queues[req->rx_qid].fw_cid,
-					   &params,
-					   req->bd_max_bytes,
-					   req->rxq_addr,
-					   req->cqe_pbl_addr,
-					   req->cqe_pbl_size,
-					   b_legacy_vf);
 
-	if (rc) {
+	rc = ecore_eth_rxq_start_ramrod(p_hwfn,
+					p_queue->p_rx_cid,
+					req->bd_max_bytes,
+					req->rxq_addr,
+					req->cqe_pbl_addr,
+					req->cqe_pbl_size);
+	if (rc != ECORE_SUCCESS) {
 		status = PFVF_STATUS_FAILURE;
+		ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
+		p_queue->p_rx_cid = OSAL_NULL;
 	} else {
 		status = PFVF_STATUS_SUCCESS;
-		vf->vf_queues[req->rx_qid].rxq_active = true;
 		vf->num_active_rxqs++;
 	}
 
 out:
-	ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
-					status, b_legacy_vf);
+	ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+					b_legacy_vf);
 }
 
 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
@@ -2141,8 +2188,10 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	struct ecore_queue_start_common_params params;
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 	u8 status = PFVF_STATUS_NO_RESOURCE;
+	struct ecore_vf_q_info *p_queue;
 	struct vfpf_start_txq_tlv *req;
 	enum _ecore_status_t rc;
+	u16 pq;
 
 	OSAL_MEMSET(&params, 0, sizeof(params));
 	req = &mbx->req_virt->start_txq;
@@ -2151,27 +2200,34 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 		goto out;
 
-	params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
-	params.qzone_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
+	/* Acquire a new queue-cid */
+	p_queue = &vf->vf_queues[req->tx_qid];
+
+	params.queue_id = p_queue->fw_tx_qid;
 	params.vport_id = vf->vport_id;
 	params.stats_id = vf->abs_vf_id + 0x10;
 	params.sb = req->hw_sb;
 	params.sb_idx = req->sb_index;
 
-	rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
-					   vf->opaque_fid,
-					   vf->vf_queues[req->tx_qid].fw_cid,
-					   &params,
-					   req->pbl_addr,
-					   req->pbl_size,
-					   ecore_get_cm_pq_idx_vf(p_hwfn,
-							vf->relative_vf_id));
+	p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
+						    vf->opaque_fid,
+						    p_queue->fw_cid,
+						    (u8)req->tx_qid,
+						    &params);
+	if (p_queue->p_tx_cid == OSAL_NULL)
+		goto out;
 
-	if (rc)
+	pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+				    vf->relative_vf_id);
+	rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+					req->pbl_addr, req->pbl_size, pq);
+	if (rc != ECORE_SUCCESS) {
 		status = PFVF_STATUS_FAILURE;
-	else {
+		ecore_eth_queue_cid_release(p_hwfn,
+					    p_queue->p_tx_cid);
+		p_queue->p_tx_cid = OSAL_NULL;
+	} else {
 		status = PFVF_STATUS_SUCCESS;
-		vf->vf_queues[req->tx_qid].txq_active = true;
 	}
 
 out:
@@ -2184,6 +2240,7 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
 						   u8 num_rxqs,
 						   bool cqe_completion)
 {
+	struct ecore_vf_q_info *p_queue;
 	enum _ecore_status_t rc = ECORE_SUCCESS;
 	int qid;
 
@@ -2191,16 +2248,18 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
 		return ECORE_INVAL;
 
 	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-		if (vf->vf_queues[qid].rxq_active) {
-			rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
-							vf->vf_queues[qid].
-							fw_rx_qid, false,
-							cqe_completion);
+		p_queue = &vf->vf_queues[qid];
 
-			if (rc)
-				return rc;
-		}
-		vf->vf_queues[qid].rxq_active = false;
+		if (!p_queue->p_rx_cid)
+			continue;
+
+		rc = ecore_eth_rx_queue_stop(p_hwfn,
+					     p_queue->p_rx_cid,
+					     false, cqe_completion);
+		if (rc != ECORE_SUCCESS)
+			return rc;
+
+		vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
 		vf->num_active_rxqs--;
 	}
 
@@ -2212,21 +2271,23 @@ static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
 						   u16 txq_id, u8 num_txqs)
 {
 	enum _ecore_status_t rc = ECORE_SUCCESS;
+	struct ecore_vf_q_info *p_queue;
 	int qid;
 
 	if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
 		return ECORE_INVAL;
 
 	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-		if (vf->vf_queues[qid].txq_active) {
-			rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
-							vf->vf_queues[qid].
-							fw_tx_qid);
+		p_queue = &vf->vf_queues[qid];
+		if (!p_queue->p_tx_cid)
+			continue;
 
-			if (rc)
-				return rc;
-		}
-		vf->vf_queues[qid].txq_active = false;
+		rc = ecore_eth_tx_queue_stop(p_hwfn,
+					     p_queue->p_tx_cid);
+		if (rc != ECORE_SUCCESS)
+			return rc;
+
+		p_queue->p_tx_cid = OSAL_NULL;
 	}
 	return rc;
 }
@@ -2282,10 +2343,11 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
 					 struct ecore_ptt *p_ptt,
 					 struct ecore_vf_info *vf)
 {
+	struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
 	u16 length = sizeof(struct pfvf_def_resp_tlv);
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 	struct vfpf_update_rxq_tlv *req;
-	u8 status = PFVF_STATUS_SUCCESS;
+	u8 status = PFVF_STATUS_FAILURE;
 	u8 complete_event_flg;
 	u8 complete_cqe_flg;
 	u16 qid;
@@ -2296,30 +2358,38 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
+	/* Validaute inputs */
+	if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
+	    !ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
+		DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+			vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+		goto out;
+	}
+
 	for (i = 0; i < req->num_rxqs; i++) {
 		qid = req->rx_qid + i;
 
-		if (!vf->vf_queues[qid].rxq_active) {
-			DP_NOTICE(p_hwfn, true,
-				  "VF rx_qid = %d isn`t active!\n", qid);
-			status = PFVF_STATUS_FAILURE;
-			break;
+		if (!vf->vf_queues[qid].p_rx_cid) {
+			DP_INFO(p_hwfn,
+				"VF[%d] rx_qid = %d isn`t active!\n",
+				vf->relative_vf_id, qid);
+			goto out;
 		}
 
-		rc = ecore_sp_eth_rx_queues_update(p_hwfn,
-						   vf->vf_queues[qid].fw_rx_qid,
-						   1,
-						   complete_cqe_flg,
-						   complete_event_flg,
-						   ECORE_SPQ_MODE_EBLOCK,
-						   OSAL_NULL);
-
-		if (rc) {
-			status = PFVF_STATUS_FAILURE;
-			break;
-		}
+		handlers[i] = vf->vf_queues[qid].p_rx_cid;
 	}
 
+	rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+					   req->num_rxqs,
+					   complete_cqe_flg,
+					   complete_event_flg,
+					   ECORE_SPQ_MODE_EBLOCK,
+					   OSAL_NULL);
+	if (rc)
+		goto out;
+
+	status = PFVF_STATUS_SUCCESS;
+out:
 	ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
 			       length, status);
 }
@@ -2548,7 +2618,7 @@ void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
 				  "rss_ind_table[%d] = %d,"
 				  " rxq is out of range\n",
 				  i, q_idx);
-		else if (!vf->vf_queues[q_idx].rxq_active)
+		else if (!vf->vf_queues[q_idx].p_rx_cid)
 			DP_NOTICE(p_hwfn, true,
 				  "rss_ind_table[%d] = %d, rxq is not active\n",
 				  i, q_idx);
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index e9ccc79..d32f931 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -64,10 +64,10 @@ struct ecore_iov_vf_mbx {
 
 struct ecore_vf_q_info {
 	u16 fw_rx_qid;
+	struct ecore_queue_cid *p_rx_cid;
 	u16 fw_tx_qid;
+	struct ecore_queue_cid *p_tx_cid;
 	u8 fw_cid;
-	u8 rxq_active;
-	u8 txq_active;
 };
 
 enum vf_state {
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index c12cbcf..d1c6691 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -451,19 +451,19 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
 #define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START + \
 				   (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
 
-enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
-					   u8 rx_qid,
-					   u16 sb,
-					   u8 sb_index,
-					   u16 bd_max_bytes,
-					   dma_addr_t bd_chain_phys_addr,
-					   dma_addr_t cqe_pbl_addr,
-					   u16 cqe_pbl_size,
-					   void OSAL_IOMEM **pp_prod)
+enum _ecore_status_t
+ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+		      struct ecore_queue_cid *p_cid,
+		      u16 bd_max_bytes,
+		      dma_addr_t bd_chain_phys_addr,
+		      dma_addr_t cqe_pbl_addr,
+		      u16 cqe_pbl_size,
+		      void OSAL_IOMEM **pp_prod)
 {
 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
 	struct pfvf_start_queue_resp_tlv *resp;
 	struct vfpf_start_rxq_tlv *req;
+	u16 rx_qid = p_cid->rel.queue_id;
 	enum _ecore_status_t rc;
 
 	/* clear mailbox and prep first tlv */
@@ -473,19 +473,20 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
 	req->cqe_pbl_addr = cqe_pbl_addr;
 	req->cqe_pbl_size = cqe_pbl_size;
 	req->rxq_addr = bd_chain_phys_addr;
-	req->hw_sb = sb;
-	req->sb_index = sb_index;
+	req->hw_sb = p_cid->rel.sb;
+	req->sb_index = p_cid->rel.sb_idx;
 	req->bd_max_bytes = bd_max_bytes;
 	req->stat_id = -1; /* Keep initialized, for future compatibility */
 
 	/* If PF is legacy, we'll need to calculate producers ourselves
 	 * as well as clean them.
 	 */
-	if (pp_prod && p_iov->b_pre_fp_hsi) {
+	if (p_iov->b_pre_fp_hsi) {
 		u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
 		u32 init_prod_val = 0;
 
-		*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
+		*pp_prod = (u8 OSAL_IOMEM *)
+			   p_hwfn->regview +
 			   MSTORM_QZONE_START(p_hwfn->p_dev) +
 			   (hw_qid) * MSTORM_QZONE_SIZE;
 
@@ -510,7 +511,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
 	}
 
 	/* Learn the address of the producer from the response */
-	if (pp_prod && !p_iov->b_pre_fp_hsi) {
+	if (!p_iov->b_pre_fp_hsi) {
 		u32 init_prod_val = 0;
 
 		*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
@@ -534,7 +535,8 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
 }
 
 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
-					  u16 rx_qid, bool cqe_completion)
+					  struct ecore_queue_cid *p_cid,
+					  bool cqe_completion)
 {
 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
 	struct vfpf_stop_rxqs_tlv *req;
@@ -544,7 +546,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
 	/* clear mailbox and prep first tlv */
 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
 
-	req->rx_qid = rx_qid;
+	req->rx_qid = p_cid->rel.queue_id;
 	req->num_rxqs = 1;
 	req->cqe_completion = cqe_completion;
 
@@ -569,29 +571,28 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
 	return rc;
 }
 
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
-					   u16 tx_queue_id,
-					   u16 sb,
-					   u8 sb_index,
-					   dma_addr_t pbl_addr,
-					   u16 pbl_size,
-					   void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+		      struct ecore_queue_cid *p_cid,
+		      dma_addr_t pbl_addr, u16 pbl_size,
+		      void OSAL_IOMEM **pp_doorbell)
 {
 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
 	struct pfvf_start_queue_resp_tlv *resp;
 	struct vfpf_start_txq_tlv *req;
+	u16 qid = p_cid->rel.queue_id;
 	enum _ecore_status_t rc;
 
 	/* clear mailbox and prep first tlv */
 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
 
-	req->tx_qid = tx_queue_id;
+	req->tx_qid = qid;
 
 	/* Tx */
 	req->pbl_addr = pbl_addr;
 	req->pbl_size = pbl_size;
-	req->hw_sb = sb;
-	req->sb_index = sb_index;
+	req->hw_sb = p_cid->rel.sb;
+	req->sb_index = p_cid->rel.sb_idx;
 
 	/* add list termination tlv */
 	ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -608,32 +609,30 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
 		goto exit;
 	}
 
-	if (pp_doorbell) {
-		/* Modern PFs provide the actual offsets, while legacy
-		 * provided only the queue id.
-		 */
-		if (!p_iov->b_pre_fp_hsi) {
-			*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
-						       resp->offset;
-		} else {
-			u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
-
+	/* Modern PFs provide the actual offsets, while legacy
+	 * provided only the queue id.
+	 */
+	if (!p_iov->b_pre_fp_hsi) {
 		*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
-				DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
-		}
+						resp->offset;
+	} else {
+		u8 cid = p_iov->acquire_resp.resc.cid[qid];
 
-		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-			   "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
-			   tx_queue_id, *pp_doorbell, resp->offset);
+		*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+						DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
 	}
 
+	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+		   "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+		   qid, *pp_doorbell, resp->offset);
 exit:
 	ecore_vf_pf_req_end(p_hwfn, rc);
 
 	return rc;
 }
 
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+					  struct ecore_queue_cid *p_cid)
 {
 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
 	struct vfpf_stop_txqs_tlv *req;
@@ -643,7 +642,7 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
 	/* clear mailbox and prep first tlv */
 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
 
-	req->tx_qid = tx_qid;
+	req->tx_qid = p_cid->rel.queue_id;
 	req->num_txqs = 1;
 
 	/* add list termination tlv */
@@ -668,20 +667,36 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
 }
 
 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
-					     u16 rx_queue_id,
+					     struct ecore_queue_cid **pp_cid,
 					     u8 num_rxqs,
-					     u8 comp_cqe_flg, u8 comp_event_flg)
+					     u8 comp_cqe_flg,
+					     u8 comp_event_flg)
 {
 	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
 	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
 	struct vfpf_update_rxq_tlv *req;
 	enum _ecore_status_t rc;
 
+	/* TODO - API is limited to assuming continuous regions of queues,
+	 * but VF queues might not fullfil this requirement.
+	 * Need to consider whether we need new TLVs for this, or whether
+	 * simply doing it iteratively is good enough.
+	 */
+	if (!num_rxqs)
+		return ECORE_INVAL;
+
+again:
 	/* clear mailbox and prep first tlv */
 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
 
-	req->rx_qid = rx_queue_id;
-	req->num_rxqs = num_rxqs;
+	/* Find the length of the current contagious range of queues beginning
+	 * at first queue's index.
+	 */
+	req->rx_qid = (*pp_cid)->rel.queue_id;
+	for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
+		if (pp_cid[req->num_rxqs]->rel.queue_id !=
+		    req->rx_qid + req->num_rxqs)
+			break;
 
 	if (comp_cqe_flg)
 		req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
@@ -702,9 +717,17 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
 		goto exit;
 	}
 
+	/* Make sure we're done with all the queues */
+	if (req->num_rxqs < num_rxqs) {
+		num_rxqs -= req->num_rxqs;
+		pp_cid += req->num_rxqs;
+		/* TODO - should we give a non-locked variant instead? */
+		ecore_vf_pf_req_end(p_hwfn, rc);
+		goto again;
+	}
+
 exit:
 	ecore_vf_pf_req_end(p_hwfn, rc);
-
 	return rc;
 }
 
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 6077d60..1afd667 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -53,10 +53,7 @@ struct ecore_vf_iov {
  * @brief VF - start the RX Queue by sending a message to the PF
  *
  * @param p_hwfn
- * @param cid			- zero based within the VF
- * @param rx_queue_id		- zero based within the VF
- * @param sb			- VF status block for this queue
- * @param sb_index		- Index within the status block
+ * @param p_cid			- Only relative fields are relevant
  * @param bd_max_bytes		- maximum number of bytes per bd
  * @param bd_chain_phys_addr	- physical address of bd chain
  * @param cqe_pbl_addr		- physical address of pbl
@@ -67,9 +64,7 @@ struct ecore_vf_iov {
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
-					   u8 rx_queue_id,
-					   u16 sb,
-					   u8 sb_index,
+					   struct ecore_queue_cid *p_cid,
 					   u16 bd_max_bytes,
 					   dma_addr_t bd_chain_phys_addr,
 					   dma_addr_t cqe_pbl_addr,
@@ -81,46 +76,44 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
  *        PF.
  *
  * @param p_hwfn
- * @param tx_queue_id		- zero based within the VF
- * @param sb			- status block for this queue
- * @param sb_index		- index within the status block
+ * @param p_cid
  * @param bd_chain_phys_addr	- physical address of tx chain
  * @param pp_doorbell		- pointer to address to which to
  *				write the doorbell too..
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
-					   u16 tx_queue_id,
-					   u16 sb,
-					   u8 sb_index,
-					   dma_addr_t pbl_addr,
-					   u16 pbl_size,
-					   void OSAL_IOMEM **pp_doorbell);
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+		      struct ecore_queue_cid *p_cid,
+		      dma_addr_t pbl_addr, u16 pbl_size,
+		      void OSAL_IOMEM **pp_doorbell);
 
 /**
  * @brief VF - stop the RX queue by sending a message to the PF
  *
  * @param p_hwfn
- * @param rx_qid
+ * @param p_cid
  * @param cqe_completion
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn	*p_hwfn,
-					  u16			rx_qid,
-					  bool			cqe_completion);
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+					  struct ecore_queue_cid *p_cid,
+					  bool cqe_completion);
 
 /**
  * @brief VF - stop the TX queue by sending a message to the PF
  *
  * @param p_hwfn
- * @param tx_qid
+ * @param p_cid
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn	*p_hwfn,
-					  u16			tx_qid);
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+					  struct ecore_queue_cid *p_cid);
+
+/* TODO - fix all the !SRIOV prototypes */
 
 #ifndef LINUX_REMOVE
 /**
@@ -128,20 +121,18 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn	*p_hwfn,
  *        PF
  *
  * @param p_hwfn
- * @param rx_queue_id
+ * @param pp_cid - list of queue-cids which we want to update
  * @param num_rxqs
- * @param init_sge_ring
  * @param comp_cqe_flg
  * @param comp_event_flg
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_vf_pf_rxqs_update(
-			struct ecore_hwfn	*p_hwfn,
-			u16			rx_queue_id,
-			u8			num_rxqs,
-			u8			comp_cqe_flg,
-			u8			comp_event_flg);
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+					     struct ecore_queue_cid **pp_cid,
+					     u8 num_rxqs,
+					     u8 comp_cqe_flg,
+					     u8 comp_event_flg);
 #endif
 
 /**
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
index d0f6e87..936dd15 100644
--- a/drivers/net/qede/qede_eth_if.c
+++ b/drivers/net/qede/qede_eth_if.c
@@ -148,7 +148,8 @@ bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
 	      uint16_t bd_max_bytes,
 	      dma_addr_t bd_chain_phys_addr,
 	      dma_addr_t cqe_pbl_addr,
-	      uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
+	      uint16_t cqe_pbl_size,
+	      struct ecore_rxq_start_ret_params *ret_params)
 {
 	struct ecore_hwfn *p_hwfn;
 	int rc, hwfn_index;
@@ -159,12 +160,14 @@ bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
 	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
 	p_params->stats_id = p_params->vport_id;
 
-	rc = ecore_sp_eth_rx_queue_start(p_hwfn,
-					 p_hwfn->hw_info.opaque_fid,
-					 p_params,
-					 bd_max_bytes,
-					 bd_chain_phys_addr,
-					 cqe_pbl_addr, cqe_pbl_size, pp_prod);
+	rc = ecore_eth_rx_queue_start(p_hwfn,
+				      p_hwfn->hw_info.opaque_fid,
+				      p_params,
+				      bd_max_bytes,
+				      bd_chain_phys_addr,
+				      cqe_pbl_addr,
+				      cqe_pbl_size,
+				      ret_params);
 
 	if (rc) {
 		DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
@@ -180,19 +183,17 @@ bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
 }
 
 static int
-qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
+qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
 {
 	int rc, hwfn_index;
 	struct ecore_hwfn *p_hwfn;
 
-	hwfn_index = params->rss_id % edev->num_hwfns;
+	hwfn_index = rss_id % edev->num_hwfns;
 	p_hwfn = &edev->hwfns[hwfn_index];
 
-	rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
-					params->rx_queue_id / edev->num_hwfns,
-					params->eq_completion_only, false);
+	rc = ecore_eth_rx_queue_stop(p_hwfn, handle, false, false);
 	if (rc) {
-		DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+		DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
 		return rc;
 	}
 
@@ -204,7 +205,8 @@ bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
 	      uint8_t rss_num,
 	      struct ecore_queue_start_common_params *p_params,
 	      dma_addr_t pbl_addr,
-	      uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
+	      uint16_t pbl_size,
+	      struct ecore_txq_start_ret_params *ret_params)
 {
 	struct ecore_hwfn *p_hwfn;
 	int rc, hwfn_index;
@@ -213,14 +215,13 @@ bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
 	p_hwfn = &edev->hwfns[hwfn_index];
 
 	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
-	p_params->qzone_id = p_params->queue_id;
 	p_params->stats_id = p_params->vport_id;
 
-	rc = ecore_sp_eth_tx_queue_start(p_hwfn,
-					 p_hwfn->hw_info.opaque_fid,
-					 p_params,
-					 0 /* tc */,
-					 pbl_addr, pbl_size, pp_doorbell);
+	rc = ecore_eth_tx_queue_start(p_hwfn,
+				      p_hwfn->hw_info.opaque_fid,
+				      p_params, 0 /* tc */,
+				      pbl_addr, pbl_size,
+				      ret_params);
 
 	if (rc) {
 		DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
@@ -236,18 +237,17 @@ bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
 }
 
 static int
-qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
+qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
 {
 	struct ecore_hwfn *p_hwfn;
 	int rc, hwfn_index;
 
-	hwfn_index = params->rss_id % edev->num_hwfns;
+	hwfn_index = rss_id % edev->num_hwfns;
 	p_hwfn = &edev->hwfns[hwfn_index];
 
-	rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
-					params->tx_queue_id / edev->num_hwfns);
+	rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
 	if (rc) {
-		DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+		DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
 		return rc;
 	}
 
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index 37b1b74..12dd828 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -47,13 +47,6 @@ struct qed_dev_eth_info {
 	bool is_legacy;
 };
 
-struct qed_stop_rxq_params {
-	uint8_t rss_id;
-	uint8_t rx_queue_id;
-	uint8_t vport_id;
-	bool eq_completion_only;
-};
-
 struct qed_update_vport_params {
 	uint8_t vport_id;
 	uint8_t update_vport_active_flg;
@@ -78,11 +71,6 @@ struct qed_start_vport_params {
 	bool clear_stats;
 };
 
-struct qed_stop_txq_params {
-	uint8_t rss_id;
-	uint8_t tx_queue_id;
-};
-
 struct qed_eth_ops {
 	const struct qed_common_ops *common;
 
@@ -103,19 +91,21 @@ struct qed_eth_ops {
 			  uint16_t bd_max_bytes,
 			  dma_addr_t bd_chain_phys_addr,
 			  dma_addr_t cqe_pbl_addr,
-			  uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
+			  uint16_t cqe_pbl_size,
+			  struct ecore_rxq_start_ret_params *ret_params);
 
 	int (*q_rx_stop)(struct ecore_dev *edev,
-			 struct qed_stop_rxq_params *params);
+			 uint8_t rss_id, void *handle);
 
 	int (*q_tx_start)(struct ecore_dev *edev,
 			  uint8_t rss_num,
 			  struct ecore_queue_start_common_params *p_params,
 			  dma_addr_t pbl_addr,
-			  uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
+			  uint16_t pbl_size,
+			  struct ecore_txq_start_ret_params *ret_params);
 
 	int (*q_tx_stop)(struct ecore_dev *edev,
-			 struct qed_stop_txq_params *params);
+			 uint8_t rss_id, void *handle);
 
 	int (*eth_cqe_completion)(struct ecore_dev *edev,
 				  uint8_t rss_id,
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 01ea9b4..85134fb 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -527,11 +527,14 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 	for_each_queue(i) {
 		fp = &qdev->fp_array[i];
 		if (fp->type & QEDE_FASTPATH_RX) {
+			struct ecore_rxq_start_ret_params ret_params;
+
 			p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
 								rx_comp_ring);
 			page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
 								rx_comp_ring);
 
+			memset(&ret_params, 0, sizeof(ret_params));
 			memset(&q_params, 0, sizeof(q_params));
 			q_params.queue_id = i;
 			q_params.vport_id = 0;
@@ -545,13 +548,17 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 					   fp->rxq->rx_bd_ring.p_phys_addr,
 					   p_phys_table,
 					   page_cnt,
-					   &fp->rxq->hw_rxq_prod_addr);
+					   &ret_params);
 			if (rc) {
 				DP_ERR(edev, "Start rxq #%d failed %d\n",
 				       fp->rxq->queue_id, rc);
 				return rc;
 			}
 
+			/* Use the return parameters */
+			fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+			fp->rxq->handle = ret_params.p_handle;
+
 			fp->rxq->hw_cons_ptr =
 					&fp->sb_info->sb_virt->pi_array[RX_PI];
 
@@ -561,6 +568,8 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 		if (!(fp->type & QEDE_FASTPATH_TX))
 			continue;
 		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct ecore_txq_start_ret_params ret_params;
+
 			txq = fp->txqs[tc];
 			txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
 
@@ -568,6 +577,7 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 			page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
 
 			memset(&q_params, 0, sizeof(q_params));
+			memset(&ret_params, 0, sizeof(ret_params));
 			q_params.queue_id = txq->queue_id;
 			q_params.vport_id = 0;
 			q_params.sb = fp->sb_info->igu_sb_id;
@@ -576,13 +586,16 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 			rc = qdev->ops->q_tx_start(edev, i, &q_params,
 						   p_phys_table,
 						   page_cnt, /* **pp_doorbell */
-						   &txq->doorbell_addr);
+						   &ret_params);
 			if (rc) {
 				DP_ERR(edev, "Start txq %u failed %d\n",
 				       txq_index, rc);
 				return rc;
 			}
 
+			txq->doorbell_addr = ret_params.p_doorbell;
+			txq->handle = ret_params.p_handle;
+
 			txq->hw_cons_ptr =
 			    &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
 			SET_FIELD(txq->tx_db.data.params,
@@ -1399,6 +1412,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
 {
 	struct qed_update_vport_params vport_update_params;
 	struct ecore_dev *edev = &qdev->edev;
+	struct qede_fastpath *fp;
 	int rc, tc, i;
 
 	/* Disable the vport */
@@ -1420,7 +1434,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
 
 	/* Flush Tx queues. If needed, request drain from MCP */
 	for_each_queue(i) {
-		struct qede_fastpath *fp = &qdev->fp_array[i];
+		fp = &qdev->fp_array[i];
 
 		if (fp->type & QEDE_FASTPATH_TX) {
 			for (tc = 0; tc < qdev->num_tc; tc++) {
@@ -1435,23 +1449,17 @@ static int qede_stop_queues(struct qede_dev *qdev)
 
 	/* Stop all Queues in reverse order */
 	for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
-		struct qed_stop_rxq_params rx_params;
+		fp = &qdev->fp_array[i];
 
 		/* Stop the Tx Queue(s) */
 		if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
 			for (tc = 0; tc < qdev->num_tc; tc++) {
-				struct qed_stop_txq_params tx_params;
-				u8 val;
-
-				tx_params.rss_id = i;
-				val = qdev->fp_array[i].txqs[tc]->queue_id;
-				tx_params.tx_queue_id = val;
-
+				struct qede_tx_queue *txq = fp->txqs[tc];
 				DP_INFO(edev, "Stopping tx queues\n");
-				rc = qdev->ops->q_tx_stop(edev, &tx_params);
+				rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
 				if (rc) {
 					DP_ERR(edev, "Failed to stop TXQ #%d\n",
-					       tx_params.tx_queue_id);
+					       i);
 					return rc;
 				}
 			}
@@ -1459,14 +1467,8 @@ static int qede_stop_queues(struct qede_dev *qdev)
 
 		/* Stop the Rx Queue */
 		if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
-			memset(&rx_params, 0, sizeof(rx_params));
-			rx_params.rss_id = i;
-			rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
-			rx_params.eq_completion_only = 1;
-
 			DP_INFO(edev, "Stopping rx queues\n");
-
-			rc = qdev->ops->q_rx_stop(edev, &rx_params);
+			rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
 			if (rc) {
 				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
 				return rc;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 9a393e9..17a2f0c 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -156,6 +156,7 @@ struct qede_rx_queue {
 	uint64_t rx_hw_errors;
 	uint64_t rx_alloc_errors;
 	struct qede_dev *qdev;
+	void *handle;
 };
 
 /*
@@ -187,6 +188,7 @@ struct qede_tx_queue {
 	uint64_t xmit_pkts;
 	bool is_legacy;
 	struct qede_dev *qdev;
+	void *handle;
 };
 
 struct qede_fastpath {
-- 
1.7.10.3

  parent reply	other threads:[~2017-02-27  7:58 UTC|newest]

Thread overview: 329+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-27  7:56 [PATCH 00/61] net/qede/base: qede PMD enhancements Rasesh Mody
2017-02-27  7:56 ` [PATCH 01/61] net/qede/base: return an initialized return value Rasesh Mody
2017-02-27  7:56 ` [PATCH 02/61] send FW version driver state to MFW Rasesh Mody
2017-03-03 10:26   ` Ferruh Yigit
2017-02-27  7:56 ` [PATCH 03/61] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-02-27  7:56 ` [PATCH 04/61] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-02-27  7:56 ` [PATCH 05/61] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-02-27  7:56 ` [PATCH 06/61] drivers/net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-02-27  7:56 ` [PATCH 07/61] net/qede/base: decrease MAX_HWFNS_PER_DEVICE from 4 to 2 Rasesh Mody
2017-02-27  7:56 ` [PATCH 08/61] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-02-27  7:56 ` [PATCH 09/61] net/qede/base: remove attribute field from update current config Rasesh Mody
2017-02-27  7:56 ` [PATCH 10/61] net/qede/base: add nvram options Rasesh Mody
2017-02-27  7:56 ` [PATCH 11/61] net/qede/base: add comment Rasesh Mody
2017-02-27  7:56 ` [PATCH 12/61] net/qede/base: use default mtu from shared memory Rasesh Mody
2017-02-27  7:56 ` [PATCH 13/61] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-02-27  7:56 ` [PATCH 14/61] net/qede/base: update MFW when default mtu is changed Rasesh Mody
2017-02-27  7:56 ` [PATCH 15/61] net/qede/base: prevent device init failure Rasesh Mody
2017-02-27  7:56 ` [PATCH 16/61] net/qede/base: add support to read personality via MFW commands Rasesh Mody
2017-02-27  7:56 ` [PATCH 17/61] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-02-27  7:56 ` [PATCH 18/61] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-02-27  7:56 ` [PATCH 19/61] net/qede/base: allow only trusted VFs to be promisc/multi-promisc Rasesh Mody
2017-02-27  7:56 ` [PATCH 20/61] net/qede/base: qm initialization revamp Rasesh Mody
2017-02-27  7:56 ` [PATCH 21/61] net/qede/base: add a printout of the FW, MFW and MBI versions Rasesh Mody
2017-02-27  7:56 ` [PATCH 22/61] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-02-27  7:56 ` [PATCH 23/61] net/qede/base: set the drv_type before sending load request Rasesh Mody
2017-02-27  7:56 ` [PATCH 24/61] net/qede/base: prevent driver laod with invalid resources Rasesh Mody
2017-02-27  7:56 ` [PATCH 25/61] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-02-27  7:56 ` [PATCH 26/61] net/qede/base: fix to set pointers to NULL after freeing Rasesh Mody
2017-02-27  7:56 ` Rasesh Mody [this message]
2017-02-27  7:56 ` [PATCH 28/61] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-02-27  7:56 ` [PATCH 29/61] net/qede/base: optimize cache-line access Rasesh Mody
2017-02-27  7:56 ` [PATCH 30/61] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-02-27  7:56 ` [PATCH 31/61] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-02-27  7:56 ` [PATCH 32/61] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-02-27  7:56 ` [PATCH 33/61] net/qede/base: formatting changes Rasesh Mody
2017-02-27  7:56 ` [PATCH 34/61] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-02-27  7:56 ` [PATCH 35/61] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-02-27  7:56 ` [PATCH 36/61] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-02-27  7:56 ` [PATCH 37/61] net/qede/base: remove clock slowdown option Rasesh Mody
2017-02-27  7:56 ` [PATCH 38/61] net/qede/base: add new image types Rasesh Mody
2017-02-27  7:56 ` [PATCH 39/61] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-02-27  7:56 ` [PATCH 40/61] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-02-27  7:56 ` [PATCH 41/61] net/qede/base: add support for previous driver unload Rasesh Mody
2017-02-27  7:56 ` [PATCH 42/61] net/qede/base: add non-l2 dcbx tlv application support Rasesh Mody
2017-02-27  7:56 ` [PATCH 43/61] net/qede/base: update bulletin board with link state during init Rasesh Mody
2017-02-27  7:57 ` [PATCH 44/61] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-02-27  7:57 ` [PATCH 45/61] net/qede/base: add macro got resource value message Rasesh Mody
2017-02-27  7:57 ` [PATCH 46/61] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-02-27  7:57 ` [PATCH 47/61] net/qede/base: add macro for unsupported command Rasesh Mody
2017-02-27  7:57 ` [PATCH 48/61] net/qede/base: Add support to set max values of soft resoruces Rasesh Mody
2017-02-27  7:57 ` [PATCH 49/61] net/qede/base: add return code check Rasesh Mody
2017-02-27  7:57 ` [PATCH 50/61] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-02-27  7:57 ` [PATCH 51/61] net/qede/base: move code bits Rasesh Mody
2017-02-27  7:57 ` [PATCH 52/61] net/qede/base: add PF parameter Rasesh Mody
2017-02-27  7:57 ` [PATCH 53/61] net/qede/base: allow PMD to control vport-id and rss-eng-id Rasesh Mody
2017-02-27  7:57 ` [PATCH 54/61] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-02-27  7:57 ` [PATCH 55/61] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-02-27  7:57 ` [PATCH 56/61] net/qede/base: add multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-02-27  7:57 ` [PATCH 57/61] net/qede/base: fix race cond between MFW attentions and PF stop Rasesh Mody
2017-02-27  7:57 ` [PATCH 58/61] net/qede/base: semantic changes Rasesh Mody
2017-02-27  7:57 ` [PATCH 59/61] net/qede/base: add support for arfs mode Rasesh Mody
2017-02-27  7:57 ` [PATCH 60/61] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-02-27  7:57 ` [PATCH 61/61] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-03 10:25 ` [PATCH 00/61] net/qede/base: qede PMD enhancements Ferruh Yigit
2017-03-18  7:05   ` [PATCH v2 " Rasesh Mody
2017-03-20 16:59     ` Ferruh Yigit
2017-03-24  7:27       ` [PATCH v3 " Rasesh Mody
2017-03-24 11:08         ` Ferruh Yigit
2017-03-28  6:42           ` [PATCH 01/62] net/qede/base: return an initialized return value Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 00/62] net/qede/base: update PMD to 2.4.0.1 Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 " Rasesh Mody
2017-03-30 12:23               ` Ferruh Yigit
2017-03-29 20:36             ` [PATCH v5 01/62] net/qede/base: return an initialized return value Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 02/62] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 03/62] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 04/62] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 05/62] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 06/62] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 07/62] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 08/62] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 09/62] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 10/62] net/qede/base: add nvram options Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 11/62] net/qede/base: add comment Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 12/62] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 13/62] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 14/62] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 15/62] net/qede/base: prevent device init failure Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 16/62] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 17/62] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 18/62] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 19/62] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 20/62] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 21/62] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 22/62] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 23/62] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 24/62] net/qede/base: prevent driver load with invalid resources Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 25/62] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 26/62] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 27/62] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 28/62] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 29/62] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 30/62] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 31/62] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 32/62] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 33/62] net/qede/base: formatting changes Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 34/62] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 35/62] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 36/62] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 37/62] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 38/62] net/qede/base: add new image types Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 39/62] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 40/62] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 41/62] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 42/62] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 43/62] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 44/62] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 45/62] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 46/62] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 47/62] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 48/62] net/qede/base: set max values for soft resources Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 49/62] net/qede/base: add return code check Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 50/62] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 51/62] net/qede/base: move code bits Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 52/62] net/qede/base: add PF parameter Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 53/62] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 54/62] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 55/62] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 56/62] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 57/62] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 58/62] net/qede/base: semantic changes Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 59/62] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-29 20:37             ` [PATCH v5 60/62] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-29 20:37             ` [PATCH v5 61/62] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-29 20:37             ` [PATCH v5 62/62] net/qede: update PMD version to 2.4.0.1 Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 01/62] net/qede/base: return an initialized return value Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 02/62] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 03/62] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 04/62] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 05/62] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 06/62] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 07/62] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 08/62] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 09/62] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 10/62] net/qede/base: add nvram options Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 11/62] net/qede/base: add comment Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 12/62] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 13/62] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 14/62] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 15/62] net/qede/base: prevent device init failure Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 16/62] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 17/62] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 18/62] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 19/62] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 20/62] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 21/62] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 22/62] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 23/62] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 24/62] net/qede/base: prevent driver load with invalid resources Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 25/62] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 26/62] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 27/62] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 28/62] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 29/62] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 30/62] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 31/62] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-28 11:22             ` Ferruh Yigit
2017-03-28 21:18               ` Mody, Rasesh
2017-03-29  9:23                 ` Ferruh Yigit
2017-03-29 20:48                   ` Mody, Rasesh
2017-03-28  6:52           ` [PATCH v4 32/62] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 33/62] net/qede/base: formatting changes Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 34/62] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 35/62] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 36/62] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 37/62] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 38/62] net/qede/base: add new image types Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 39/62] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 40/62] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 41/62] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 42/62] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 43/62] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 44/62] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 45/62] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 46/62] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 47/62] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 48/62] net/qede/base: set max values for soft resources Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 49/62] net/qede/base: add return code check Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 50/62] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 51/62] net/qede/base: move code bits Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 52/62] net/qede/base: add PF parameter Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 53/62] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 54/62] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 55/62] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 56/62] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 57/62] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 58/62] net/qede/base: semantic changes Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 59/62] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 60/62] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 61/62] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 62/62] net/qede: update PMD version to 2.4.0.1 Rasesh Mody
     [not found]           ` <1490683278-23776-1-git-send-email-y>
2017-03-28  6:54             ` [PATCH 00/62] net/qede/base: update PMD " Mody, Rasesh
2017-03-24  7:27       ` [PATCH v3 01/61] net/qede/base: return an initialized return value Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 02/61] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 03/61] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 04/61] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 05/61] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 06/61] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 07/61] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 08/61] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 09/61] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 10/61] net/qede/base: add nvram options Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 11/61] net/qede/base: add comment Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 12/61] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 13/61] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 14/61] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 15/61] net/qede/base: prevent device init failure Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 16/61] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 17/61] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 18/61] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 19/61] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 20/61] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 21/61] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 22/61] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 23/61] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 24/61] net/qede/base: prevent driver laod with invalid resources Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 25/61] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 26/61] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 27/61] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 28/61] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 29/61] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 30/61] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 31/61] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 32/61] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 33/61] net/qede/base: formatting changes Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 34/61] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 35/61] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 36/61] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 37/61] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 38/61] net/qede/base: add new image types Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 39/61] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 40/61] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 41/61] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-24 11:00         ` Ferruh Yigit
2017-03-25  6:25           ` Mody, Rasesh
2017-03-24  7:28       ` [PATCH v3 42/61] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 43/61] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 44/61] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 45/61] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 46/61] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 47/61] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 48/61] net/qede/base: set max values for soft resoruces Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 49/61] net/qede/base: add return code check Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 50/61] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 51/61] net/qede/base: move code bits Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 52/61] net/qede/base: add PF parameter Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 53/61] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 54/61] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 55/61] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 56/61] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 57/61] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 58/61] net/qede/base: semantic changes Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 59/61] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 60/61] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 61/61] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-24  7:45       ` [PATCH v2 00/61] net/qede/base: qede PMD enhancements Mody, Rasesh
2017-03-18  7:05   ` [PATCH v2 01/61] net/qede/base: return an initialized return value Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 02/61] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 03/61] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 04/61] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 05/61] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 06/61] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 07/61] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 08/61] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 09/61] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 10/61] net/qede/base: add nvram options Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 11/61] net/qede/base: add comment Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 12/61] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 13/61] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 14/61] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 15/61] net/qede/base: prevent device init failure Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 16/61] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 17/61] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 18/61] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 19/61] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 20/61] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 21/61] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 22/61] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 23/61] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 24/61] net/qede/base: prevent driver laod with invalid resources Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 25/61] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 26/61] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 27/61] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 28/61] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 29/61] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 30/61] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 31/61] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 32/61] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 33/61] net/qede/base: formatting changes Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 34/61] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 35/61] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 36/61] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 37/61] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 38/61] net/qede/base: add new image types Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 39/61] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 40/61] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 41/61] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 42/61] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 43/61] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 44/61] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 45/61] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 46/61] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 47/61] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 48/61] net/qede/base: set max values for soft resoruces Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 49/61] net/qede/base: add return code check Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 50/61] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 51/61] net/qede/base: move code bits Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 52/61] net/qede/base: add PF parameter Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 53/61] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 54/61] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 55/61] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 56/61] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 57/61] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 58/61] net/qede/base: semantic changes Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 59/61] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 60/61] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 61/61] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-24 11:58     ` Ferruh Yigit
2017-03-25  6:28       ` Mody, Rasesh
2017-03-18  7:18   ` [PATCH 00/61] net/qede/base: qede PMD enhancements Mody, Rasesh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1488182237-10247-28-git-send-email-rasesh.mody@cavium.com \
    --to=rasesh.mody@cavium.com \
    --cc=Dept-EngDPDKDev@cavium.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.