All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rasesh Mody <rasesh.mody@cavium.com>
To: <ferruh.yigit@intel.com>, <dev@dpdk.org>
Cc: Harish Patil <harish.patil@qlogic.com>
Subject: [PATCH v4 61/62] net/qede: add LRO/TSO offloads support
Date: Mon, 27 Mar 2017 23:52:31 -0700	[thread overview]
Message-ID: <1490683952-24919-62-git-send-email-rasesh.mody@cavium.com> (raw)
In-Reply-To: <798af029-9a26-9065-350b-48781c1d3c55@intel.com>

From: Harish Patil <harish.patil@qlogic.com>

This patch includes slowpath configuration and fastpath changes
to support LRO and TSO. A bit of revamping is needed in order
to make use of existing packet classification schemes in Rx fastpath
and for SG element processing in Tx.

Signed-off-by: Harish Patil <harish.patil@qlogic.com>
---
 doc/guides/nics/features/qede.ini    |    2 +
 doc/guides/nics/features/qede_vf.ini |    2 +
 doc/guides/nics/qede.rst             |    2 +-
 drivers/net/qede/qede_eth_if.c       |    6 +-
 drivers/net/qede/qede_eth_if.h       |    3 +-
 drivers/net/qede/qede_ethdev.c       |   29 +-
 drivers/net/qede/qede_ethdev.h       |    3 +-
 drivers/net/qede/qede_rxtx.c         |  739 +++++++++++++++++++++++++---------
 drivers/net/qede/qede_rxtx.h         |   30 ++
 9 files changed, 605 insertions(+), 211 deletions(-)

diff --git a/doc/guides/nics/features/qede.ini b/doc/guides/nics/features/qede.ini
index b688914..fba5dc3 100644
--- a/doc/guides/nics/features/qede.ini
+++ b/doc/guides/nics/features/qede.ini
@@ -36,3 +36,5 @@ x86-64               = Y
 Usage doc            = Y
 N-tuple filter       = Y
 Flow director        = Y
+LRO                  = Y
+TSO                  = Y
diff --git a/doc/guides/nics/features/qede_vf.ini b/doc/guides/nics/features/qede_vf.ini
index acb1b99..21ec40f 100644
--- a/doc/guides/nics/features/qede_vf.ini
+++ b/doc/guides/nics/features/qede_vf.ini
@@ -31,4 +31,6 @@ Stats per queue      = Y
 Multiprocess aware   = Y
 Linux UIO            = Y
 x86-64               = Y
+LRO                  = Y
+TSO                  = Y
 Usage doc            = Y
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
index df0aaec..eacb3da 100644
--- a/doc/guides/nics/qede.rst
+++ b/doc/guides/nics/qede.rst
@@ -61,13 +61,13 @@ Supported Features
 - Scatter-Gather
 - VXLAN tunneling offload
 - N-tuple filter and flow director (limited support)
+- LRO/TSO
 
 Non-supported Features
 ----------------------
 
 - SR-IOV PF
 - GENEVE and NVGRE Tunneling offloads
-- LRO/TSO
 - NPAR
 
 Supported QLogic Adapters
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
index 8e4290c..86bb129 100644
--- a/drivers/net/qede/qede_eth_if.c
+++ b/drivers/net/qede/qede_eth_if.c
@@ -18,8 +18,8 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
 		u8 tx_switching = 0;
 		struct ecore_sp_vport_start_params start = { 0 };
 
-		start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
-		    ECORE_TPA_MODE_NONE;
+		start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
+				ECORE_TPA_MODE_NONE;
 		start.remove_inner_vlan = p_params->remove_inner_vlan;
 		start.tx_switching = tx_switching;
 		start.only_untagged = false;	/* untagged only */
@@ -29,7 +29,6 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
 		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
 		start.handle_ptp_pkts = p_params->handle_ptp_pkts;
 		start.vport_id = p_params->vport_id;
-		start.max_buffers_per_cqe = 16;	/* TODO-is this right */
 		start.mtu = p_params->mtu;
 		/* @DPDK - Disable FW placement */
 		start.zero_placement_offset = 1;
@@ -120,6 +119,7 @@ qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
 	sp_params.update_accept_any_vlan_flg =
 	    params->update_accept_any_vlan_flg;
 	sp_params.mtu = params->mtu;
+	sp_params.sge_tpa_params = params->sge_tpa_params;
 
 	for_each_hwfn(edev, i) {
 		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index 12dd828..d845bac 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -59,12 +59,13 @@ struct qed_update_vport_params {
 	uint8_t accept_any_vlan;
 	uint8_t update_rss_flg;
 	uint16_t mtu;
+	struct ecore_sge_tpa_params *sge_tpa_params;
 };
 
 struct qed_start_vport_params {
 	bool remove_inner_vlan;
 	bool handle_ptp_pkts;
-	bool gro_enable;
+	bool enable_lro;
 	bool drop_ttl0;
 	uint8_t vport_id;
 	uint16_t mtu;
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 22b528d..0762111 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -769,7 +769,7 @@ static int qede_init_vport(struct qede_dev *qdev)
 	int rc;
 
 	start.remove_inner_vlan = 1;
-	start.gro_enable = 0;
+	start.enable_lro = qdev->enable_lro;
 	start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
 	start.vport_id = 0;
 	start.drop_ttl0 = false;
@@ -866,11 +866,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	if (rxmode->enable_scatter == 1)
 		eth_dev->data->scattered_rx = 1;
 
-	if (rxmode->enable_lro == 1) {
-		DP_ERR(edev, "LRO is not supported\n");
-		return -EINVAL;
-	}
-
 	if (!rxmode->hw_strip_crc)
 		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
 
@@ -878,6 +873,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
 			      "in hw\n");
 
+	if (rxmode->enable_lro) {
+		qdev->enable_lro = true;
+		/* Enable scatter mode for LRO */
+		if (!rxmode->enable_scatter)
+			eth_dev->data->scattered_rx = 1;
+	}
+
 	/* Check for the port restart case */
 	if (qdev->state != QEDE_DEV_INIT) {
 		rc = qdev->ops->vport_stop(edev, 0);
@@ -957,13 +959,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
 	.nb_max = NUM_RX_BDS_MAX,
 	.nb_min = 128,
-	.nb_align = 128	/* lowest common multiple */
+	.nb_align = 128 /* lowest common multiple */
 };
 
 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
 	.nb_max = NUM_TX_BDS_MAX,
 	.nb_min = 256,
-	.nb_align = 256
+	.nb_align = 256,
+	.nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
+	.nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
 };
 
 static void
@@ -1005,12 +1009,16 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 				     DEV_RX_OFFLOAD_IPV4_CKSUM	|
 				     DEV_RX_OFFLOAD_UDP_CKSUM	|
 				     DEV_RX_OFFLOAD_TCP_CKSUM	|
-				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM);
+				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     DEV_RX_OFFLOAD_TCP_LRO);
+
 	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
 				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
 				     DEV_TX_OFFLOAD_UDP_CKSUM	|
 				     DEV_TX_OFFLOAD_TCP_CKSUM	|
-				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+				     DEV_TX_OFFLOAD_TCP_TSO |
+				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
 
 	memset(&link, 0, sizeof(struct qed_link_output));
 	qdev->ops->common->get_link(edev, &link);
@@ -2107,6 +2115,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
 	eth_dev->rx_pkt_burst = qede_recv_pkts;
 	eth_dev->tx_pkt_burst = qede_xmit_pkts;
+	eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 		DP_NOTICE(edev, false,
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 8342b99..799a3ba 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -193,8 +193,7 @@ struct qede_dev {
 	uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
 	uint64_t rss_hf;
 	uint8_t rss_key_len;
-	uint32_t flags;
-	bool gro_disable;
+	bool enable_lro;
 	uint16_t num_queues;
 	uint8_t fp_num_tx;
 	uint8_t fp_num_rx;
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 85134fb..e72a693 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -6,10 +6,9 @@
  * See LICENSE.qede_pmd for copyright and licensing details.
  */
 
+#include <rte_net.h>
 #include "qede_rxtx.h"
 
-static bool gro_disable = 1;	/* mod_param */
-
 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
 {
 	struct rte_mbuf *new_mb = NULL;
@@ -352,7 +351,6 @@ static void qede_init_fp(struct qede_dev *qdev)
 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
 	}
 
-	qdev->gro_disable = gro_disable;
 }
 
 void qede_free_fp_arrays(struct qede_dev *qdev)
@@ -509,6 +507,30 @@ qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
 	PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u", bd_prod, cqe_prod);
 }
 
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+			   uint16_t mtu, bool enable)
+{
+	/* Enable LRO in split mode */
+	sge_tpa_params->tpa_ipv4_en_flg = enable;
+	sge_tpa_params->tpa_ipv6_en_flg = enable;
+	sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
+	sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
+	/* set if tpa enable changes */
+	sge_tpa_params->update_tpa_en_flg = 1;
+	/* set if tpa parameters should be handled */
+	sge_tpa_params->update_tpa_param_flg = enable;
+
+	sge_tpa_params->max_buffers_per_cqe = 20;
+	sge_tpa_params->tpa_pkt_split_flg = 1;
+	sge_tpa_params->tpa_hdr_data_split_flg = 0;
+	sge_tpa_params->tpa_gro_consistent_flg = 0;
+	sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+	sge_tpa_params->tpa_max_size = 0x7FFF;
+	sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+	sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
 static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 {
 	struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -516,6 +538,7 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 	struct ecore_queue_start_common_params q_params;
 	struct qed_dev_info *qed_info = &qdev->dev_info.common;
 	struct qed_update_vport_params vport_update_params;
+	struct ecore_sge_tpa_params tpa_params;
 	struct qede_tx_queue *txq;
 	struct qede_fastpath *fp;
 	dma_addr_t p_phys_table;
@@ -529,10 +552,10 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 		if (fp->type & QEDE_FASTPATH_RX) {
 			struct ecore_rxq_start_ret_params ret_params;
 
-			p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
-								rx_comp_ring);
-			page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
-								rx_comp_ring);
+			p_phys_table =
+			    ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+			page_cnt =
+			    ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
 
 			memset(&ret_params, 0, sizeof(ret_params));
 			memset(&q_params, 0, sizeof(q_params));
@@ -625,6 +648,14 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 		vport_update_params.tx_switching_flg = 1;
 	}
 
+	/* TPA */
+	if (qdev->enable_lro) {
+		DP_INFO(edev, "Enabling LRO\n");
+		memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+		qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
+		vport_update_params.sge_tpa_params = &tpa_params;
+	}
+
 	rc = qdev->ops->vport_update(edev, &vport_update_params);
 	if (rc) {
 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
@@ -761,6 +792,94 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
 		return RTE_PTYPE_UNKNOWN;
 }
 
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+			     struct qede_rx_queue *rxq,
+			     struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qede_agg_info *tpa_info;
+	struct rte_mbuf *temp_frag; /* Pointer to mbuf chain head */
+	struct rte_mbuf *curr_frag;
+	uint8_t list_count = 0;
+	uint16_t cons_idx;
+	uint8_t i;
+
+	PMD_RX_LOG(INFO, rxq, "TPA cont[%02x] - len_list [%04x %04x]\n",
+		   cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]),
+		   rte_le_to_cpu_16(cqe->len_list[1]));
+
+	tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+	temp_frag = tpa_info->mbuf;
+	assert(temp_frag);
+
+	for (i = 0; cqe->len_list[i]; i++) {
+		cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+		curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+		qede_rx_bd_ring_consume(rxq);
+		curr_frag->data_len = rte_le_to_cpu_16(cqe->len_list[i]);
+		temp_frag->next = curr_frag;
+		temp_frag = curr_frag;
+		list_count++;
+	}
+
+	/* Allocate RX mbuf on the RX BD ring for those many consumed  */
+	for (i = 0 ; i < list_count ; i++) {
+		if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+			DP_ERR(edev, "Failed to allocate mbuf for LRO cont\n");
+			tpa_info->state = QEDE_AGG_STATE_ERROR;
+		}
+	}
+}
+
+static inline void
+qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
+			    struct qede_rx_queue *rxq,
+			    struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qede_agg_info *tpa_info;
+	struct rte_mbuf *temp_frag; /* Pointer to mbuf chain head */
+	struct rte_mbuf *curr_frag;
+	struct rte_mbuf *rx_mb;
+	uint8_t list_count = 0;
+	uint16_t cons_idx;
+	uint8_t i;
+
+	PMD_RX_LOG(INFO, rxq, "TPA End[%02x] - len_list [%04x %04x]\n",
+		   cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]),
+		   rte_le_to_cpu_16(cqe->len_list[1]));
+
+	tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+	temp_frag = tpa_info->mbuf;
+	assert(temp_frag);
+
+	for (i = 0; cqe->len_list[i]; i++) {
+		cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+		curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+		qede_rx_bd_ring_consume(rxq);
+		curr_frag->data_len = rte_le_to_cpu_16(cqe->len_list[i]);
+		temp_frag->next = curr_frag;
+		temp_frag = curr_frag;
+		list_count++;
+	}
+
+	/* Allocate RX mbuf on the RX BD ring for those many consumed */
+	for (i = 0 ; i < list_count ; i++) {
+		if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+			DP_ERR(edev, "Failed to allocate mbuf for lro end\n");
+			tpa_info->state = QEDE_AGG_STATE_ERROR;
+		}
+	}
+
+	/* Update total length and frags based on end TPA */
+	rx_mb = rxq->tpa_info[cqe->tpa_agg_index].mbuf;
+	/* TBD: Add sanity checks here */
+	rx_mb->nb_segs = cqe->num_of_bds;
+	rx_mb->pkt_len = cqe->total_packet_len;
+	tpa_info->state = QEDE_AGG_STATE_NONE;
+}
+
 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 {
 	uint32_t val;
@@ -875,13 +994,20 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	uint16_t pkt_len; /* Sum of all BD segments */
 	uint16_t len; /* Length of first BD */
 	uint8_t num_segs = 1;
-	uint16_t pad;
 	uint16_t preload_idx;
 	uint8_t csum_flag;
 	uint16_t parse_flag;
 	enum rss_hash_type htype;
 	uint8_t tunn_parse_flag;
 	uint8_t j;
+	struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
+	uint64_t ol_flags;
+	uint32_t packet_type;
+	uint16_t vlan_tci;
+	bool tpa_start_flg;
+	uint8_t bitfield_val;
+	uint8_t offset, tpa_agg_idx, flags;
+	struct qede_agg_info *tpa_info;
 
 	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
 	sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -892,16 +1018,53 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		return 0;
 
 	while (sw_comp_cons != hw_comp_cons) {
+		ol_flags = 0;
+		packet_type = RTE_PTYPE_UNKNOWN;
+		vlan_tci = 0;
+		tpa_start_flg = false;
+
 		/* Get the CQE from the completion ring */
 		cqe =
 		    (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
 		cqe_type = cqe->fast_path_regular.type;
-
-		if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
-			PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE");
-
+		PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+		switch (cqe_type) {
+		case ETH_RX_CQE_TYPE_REGULAR:
+			fp_cqe = &cqe->fast_path_regular;
+		break;
+		case ETH_RX_CQE_TYPE_TPA_START:
+			cqe_start_tpa = &cqe->fast_path_tpa_start;
+			tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
+			tpa_start_flg = true;
+			PMD_RX_LOG(INFO, rxq,
+			    "TPA start[%u] - len %04x [header %02x]"
+			    " [bd_list[0] %04x], [seg_len %04x]\n",
+			    cqe_start_tpa->tpa_agg_index,
+			    rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+			    cqe_start_tpa->header_len,
+			    rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
+			    rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+
+		break;
+		case ETH_RX_CQE_TYPE_TPA_CONT:
+			qede_rx_process_tpa_cont_cqe(qdev, rxq,
+						     &cqe->fast_path_tpa_cont);
+			continue;
+		case ETH_RX_CQE_TYPE_TPA_END:
+			qede_rx_process_tpa_end_cqe(qdev, rxq,
+						    &cqe->fast_path_tpa_end);
+			tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
+			rx_mb = rxq->tpa_info[tpa_agg_idx].mbuf;
+			PMD_RX_LOG(INFO, rxq, "TPA end reason %d\n",
+				   cqe->fast_path_tpa_end.end_reason);
+			goto tpa_end;
+		case ETH_RX_CQE_TYPE_SLOW_PATH:
+			PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
 			qdev->ops->eth_cqe_completion(edev, fp->id,
 				(struct eth_slow_path_rx_cqe *)cqe);
+			/* fall-thru */
+		default:
 			goto next_cqe;
 		}
 
@@ -910,69 +1073,93 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
 		assert(rx_mb != NULL);
 
-		/* non GRO */
-		fp_cqe = &cqe->fast_path_regular;
-
-		len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
-		pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
-		pad = fp_cqe->placement_offset;
-		assert((len + pad) <= rx_mb->buf_len);
-
-		PMD_RX_LOG(DEBUG, rxq,
-			   "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
-			   " len = %u, parsing_flags = %d",
-			   cqe_type, fp_cqe->bitfields,
-			   rte_le_to_cpu_16(fp_cqe->vlan_tag),
-			   len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
-
-		/* If this is an error packet then drop it */
-		parse_flag =
-		    rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
-
-		rx_mb->ol_flags = 0;
-
+		/* Handle regular CQE or TPA start CQE */
+		if (!tpa_start_flg) {
+			parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+			bitfield_val = fp_cqe->bitfields;
+			offset = fp_cqe->placement_offset;
+			len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+			pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+		} else {
+			parse_flag =
+			    rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
+			bitfield_val = cqe_start_tpa->bitfields;
+			offset = cqe_start_tpa->placement_offset;
+			/* seg_len = len_on_first_bd */
+			len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
+			tpa_info->start_cqe_bd_len = len +
+						cqe_start_tpa->header_len;
+			tpa_info->mbuf = rx_mb;
+		}
 		if (qede_tunn_exist(parse_flag)) {
-			PMD_RX_LOG(DEBUG, rxq, "Rx tunneled packet");
+			PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
 			if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
 				PMD_RX_LOG(ERR, rxq,
-					    "L4 csum failed, flags = 0x%x",
+					    "L4 csum failed, flags = 0x%x\n",
 					    parse_flag);
 				rxq->rx_hw_errors++;
-				rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+				ol_flags |= PKT_RX_L4_CKSUM_BAD;
 			} else {
-				tunn_parse_flag =
-						fp_cqe->tunnel_pars_flags.flags;
-				rx_mb->packet_type =
-					qede_rx_cqe_to_tunn_pkt_type(
-							tunn_parse_flag);
+				ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+				if (tpa_start_flg)
+					flags =
+					 cqe_start_tpa->tunnel_pars_flags.flags;
+				else
+					flags = fp_cqe->tunnel_pars_flags.flags;
+				tunn_parse_flag = flags;
+				packet_type =
+				qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
 			}
 		} else {
-			PMD_RX_LOG(DEBUG, rxq, "Rx non-tunneled packet");
+			PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
 			if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
 				PMD_RX_LOG(ERR, rxq,
-					    "L4 csum failed, flags = 0x%x",
+					    "L4 csum failed, flags = 0x%x\n",
 					    parse_flag);
 				rxq->rx_hw_errors++;
-				rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-			} else if (unlikely(qede_check_notunn_csum_l3(rx_mb,
+				ol_flags |= PKT_RX_L4_CKSUM_BAD;
+			} else {
+				ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+			}
+			if (unlikely(qede_check_notunn_csum_l3(rx_mb,
 							parse_flag))) {
 				PMD_RX_LOG(ERR, rxq,
-					   "IP csum failed, flags = 0x%x",
+					   "IP csum failed, flags = 0x%x\n",
 					   parse_flag);
 				rxq->rx_hw_errors++;
-				rx_mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+				ol_flags |= PKT_RX_IP_CKSUM_BAD;
 			} else {
-				rx_mb->packet_type =
+				ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+				packet_type =
 					qede_rx_cqe_to_pkt_type(parse_flag);
 			}
 		}
 
-		PMD_RX_LOG(INFO, rxq, "packet_type 0x%x", rx_mb->packet_type);
+		if (CQE_HAS_VLAN(parse_flag)) {
+			vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+			ol_flags |= PKT_RX_VLAN_PKT;
+		}
+
+		if (CQE_HAS_OUTER_VLAN(parse_flag)) {
+			vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+			ol_flags |= PKT_RX_QINQ_PKT;
+			rx_mb->vlan_tci_outer = 0;
+		}
+
+		/* RSS Hash */
+		htype = (uint8_t)GET_FIELD(bitfield_val,
+					ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+		if (qdev->rss_enable && htype) {
+			ol_flags |= PKT_RX_RSS_HASH;
+			rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
+			PMD_RX_LOG(INFO, rxq, "Hash result 0x%x\n",
+				   rx_mb->hash.rss);
+		}
 
 		if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
 			PMD_RX_LOG(ERR, rxq,
 				   "New buffer allocation failed,"
-				   "dropping incoming packet");
+				   "dropping incoming packet\n");
 			qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
 			rte_eth_devices[rxq->port_id].
 			    data->rx_mbuf_alloc_failed++;
@@ -980,7 +1167,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			break;
 		}
 		qede_rx_bd_ring_consume(rxq);
-		if (fp_cqe->bd_num > 1) {
+
+		if (!tpa_start_flg && fp_cqe->bd_num > 1) {
 			PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
 				   " len on first: %04x Total Len: %04x",
 				   fp_cqe->bd_num, len, pkt_len);
@@ -1008,40 +1196,24 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
 
 		/* Update rest of the MBUF fields */
-		rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
-		rx_mb->nb_segs = fp_cqe->bd_num;
-		rx_mb->data_len = len;
-		rx_mb->pkt_len = pkt_len;
+		rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
 		rx_mb->port = rxq->port_id;
-
-		htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
-				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-		if (qdev->rss_enable && htype) {
-			rx_mb->ol_flags |= PKT_RX_RSS_HASH;
-			rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
-			PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x",
-				   rx_mb->hash.rss);
+		rx_mb->ol_flags = ol_flags;
+		rx_mb->data_len = len;
+		rx_mb->vlan_tci = vlan_tci;
+		rx_mb->packet_type = packet_type;
+		PMD_RX_LOG(INFO, rxq, "pkt_type %04x len %04x flags %04lx\n",
+			   packet_type, len, (unsigned long)ol_flags);
+		if (!tpa_start_flg) {
+			rx_mb->nb_segs = fp_cqe->bd_num;
+			rx_mb->pkt_len = pkt_len;
 		}
-
 		rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
-
-		if (CQE_HAS_VLAN(parse_flag)) {
-			rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
-			rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
-		}
-
-		if (CQE_HAS_OUTER_VLAN(parse_flag)) {
-			/* FW does not provide indication of Outer VLAN tag,
-			 * which is always stripped, so vlan_tci_outer is set
-			 * to 0. Here vlan_tag represents inner VLAN tag.
-			 */
-			rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
-			rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
-			rx_mb->vlan_tci_outer = 0;
+tpa_end:
+		if (!tpa_start_flg) {
+			rx_pkts[rx_pkt] = rx_mb;
+			rx_pkt++;
 		}
-
-		rx_pkts[rx_pkt] = rx_mb;
-		rx_pkt++;
 next_cqe:
 		ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
 		sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1062,101 +1234,91 @@ next_cqe:
 	return rx_pkt;
 }
 
-static inline int
-qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
 {
-	uint16_t nb_segs, idx = TX_CONS(txq);
-	struct eth_tx_bd *tx_data_bd;
-	struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
-
-	if (unlikely(!mbuf)) {
-		PMD_TX_LOG(ERR, txq, "null mbuf");
-		PMD_TX_LOG(ERR, txq,
-			   "tx_desc %u tx_avail %u tx_cons %u tx_prod %u",
-			   txq->nb_tx_desc, txq->nb_tx_avail, idx,
-			   TX_PROD(txq));
-		return -1;
-	}
-
-	nb_segs = mbuf->nb_segs;
-	while (nb_segs) {
-		/* It's like consuming rxbuf in recv() */
+	struct rte_mbuf *mbuf;
+	uint16_t nb_segs;
+	uint16_t idx;
+	uint8_t nbds;
+
+	idx = TX_CONS(txq);
+	mbuf = txq->sw_tx_ring[idx].mbuf;
+	if (mbuf) {
+		nb_segs = mbuf->nb_segs;
+		PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+		while (nb_segs) {
+			/* It's like consuming rxbuf in recv() */
+			ecore_chain_consume(&txq->tx_pbl);
+			txq->nb_tx_avail++;
+			nb_segs--;
+		}
+		rte_pktmbuf_free(mbuf);
+		txq->sw_tx_ring[idx].mbuf = NULL;
+		txq->sw_tx_cons++;
+		PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+	} else {
 		ecore_chain_consume(&txq->tx_pbl);
 		txq->nb_tx_avail++;
-		nb_segs--;
 	}
-	rte_pktmbuf_free(mbuf);
-	txq->sw_tx_ring[idx].mbuf = NULL;
-
-	return 0;
 }
 
-static inline uint16_t
+static inline void
 qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
 {
-	uint16_t tx_compl = 0;
 	uint16_t hw_bd_cons;
+	uint16_t sw_tx_cons;
 
-	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
 	rte_compiler_barrier();
-
-	while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
-		if (qede_free_tx_pkt(edev, txq)) {
-			PMD_TX_LOG(ERR, txq,
-				   "hw_bd_cons = %u, chain_cons = %u",
-				   hw_bd_cons,
-				   ecore_chain_get_cons_idx(&txq->tx_pbl));
-			break;
-		}
-		txq->sw_tx_cons++;	/* Making TXD available */
-		tx_compl++;
-	}
-
-	PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u",
-		   tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
-	return tx_compl;
+	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+	sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+	PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+		   abs(hw_bd_cons - sw_tx_cons));
+	while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
+		qede_free_tx_pkt(txq);
 }
 
 /* Populate scatter gather buffer descriptor fields */
 static inline uint8_t
 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
-		  struct eth_tx_1st_bd *bd1)
+		  struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
 {
 	struct qede_tx_queue *txq = p_txq;
-	struct eth_tx_2nd_bd *bd2 = NULL;
-	struct eth_tx_3rd_bd *bd3 = NULL;
 	struct eth_tx_bd *tx_bd = NULL;
 	dma_addr_t mapping;
-	uint8_t nb_segs = 1; /* min one segment per packet */
+	uint8_t nb_segs = 0;
 
 	/* Check for scattered buffers */
 	while (m_seg) {
-		if (nb_segs == 1) {
-			bd2 = (struct eth_tx_2nd_bd *)
-				ecore_chain_produce(&txq->tx_pbl);
-			memset(bd2, 0, sizeof(*bd2));
+		if (nb_segs == 0) {
+			if (!*bd2) {
+				*bd2 = (struct eth_tx_2nd_bd *)
+					ecore_chain_produce(&txq->tx_pbl);
+				memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
+				nb_segs++;
+			}
 			mapping = rte_mbuf_data_dma_addr(m_seg);
-			QEDE_BD_SET_ADDR_LEN(bd2, mapping, m_seg->data_len);
-			PMD_TX_LOG(DEBUG, txq, "BD2 len %04x",
-				   m_seg->data_len);
-		} else if (nb_segs == 2) {
-			bd3 = (struct eth_tx_3rd_bd *)
-				ecore_chain_produce(&txq->tx_pbl);
-			memset(bd3, 0, sizeof(*bd3));
+			QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
+			PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
+		} else if (nb_segs == 1) {
+			if (!*bd3) {
+				*bd3 = (struct eth_tx_3rd_bd *)
+					ecore_chain_produce(&txq->tx_pbl);
+				memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
+				nb_segs++;
+			}
 			mapping = rte_mbuf_data_dma_addr(m_seg);
-			QEDE_BD_SET_ADDR_LEN(bd3, mapping, m_seg->data_len);
-			PMD_TX_LOG(DEBUG, txq, "BD3 len %04x",
-				   m_seg->data_len);
+			QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
+			PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
 		} else {
 			tx_bd = (struct eth_tx_bd *)
 				ecore_chain_produce(&txq->tx_pbl);
 			memset(tx_bd, 0, sizeof(*tx_bd));
+			nb_segs++;
 			mapping = rte_mbuf_data_dma_addr(m_seg);
 			QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
-			PMD_TX_LOG(DEBUG, txq, "BD len %04x",
-				   m_seg->data_len);
+			PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
 		}
-		nb_segs++;
 		m_seg = m_seg->next;
 	}
 
@@ -1164,59 +1326,209 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
 	return nb_segs;
 }
 
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+print_tx_bd_info(struct qede_tx_queue *txq,
+		 struct eth_tx_1st_bd *bd1,
+		 struct eth_tx_2nd_bd *bd2,
+		 struct eth_tx_3rd_bd *bd3,
+		 uint64_t tx_ol_flags)
+{
+	char ol_buf[256] = { 0 }; /* for verbose prints */
+
+	if (bd1)
+		PMD_TX_LOG(INFO, txq,
+			   "BD1: nbytes=%u nbds=%u bd_flags=04%x bf=%04x",
+			   rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+			   bd1->data.bd_flags.bitfields,
+			   rte_cpu_to_le_16(bd1->data.bitfields));
+	if (bd2)
+		PMD_TX_LOG(INFO, txq,
+			   "BD2: nbytes=%u bf=%04x\n",
+			   rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
+	if (bd3)
+		PMD_TX_LOG(INFO, txq,
+			   "BD3: nbytes=%u bf=%04x mss=%u\n",
+			   rte_cpu_to_le_16(bd3->nbytes),
+			   rte_cpu_to_le_16(bd3->data.bitfields),
+			   rte_cpu_to_le_16(bd3->data.lso_mss));
+
+	rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
+	PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
+}
+#endif
+
+/* TX prepare to check packets meets TX conditions */
+uint16_t
+qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+		    uint16_t nb_pkts)
+{
+	struct qede_tx_queue *txq = p_txq;
+	uint64_t ol_flags;
+	struct rte_mbuf *m;
+	uint16_t i;
+	int ret;
+
+	for (i = 0; i < nb_pkts; i++) {
+		m = tx_pkts[i];
+		ol_flags = m->ol_flags;
+		if (ol_flags & PKT_TX_TCP_SEG) {
+			if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
+				rte_errno = -EINVAL;
+				break;
+			}
+			/* TBD: confirm its ~9700B for both ? */
+			if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
+				rte_errno = -EINVAL;
+				break;
+			}
+		} else {
+			if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
+				rte_errno = -EINVAL;
+				break;
+			}
+		}
+		if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
+			rte_errno = -ENOTSUP;
+			break;
+		}
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+		ret = rte_validate_tx_offload(m);
+		if (ret != 0) {
+			rte_errno = ret;
+			break;
+		}
+#endif
+		/* TBD: pseudo csum calcuation required iff
+		 * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
+		 */
+		ret = rte_net_intel_cksum_prepare(m);
+		if (ret != 0) {
+			rte_errno = ret;
+			break;
+		}
+	}
+
+	if (unlikely(i != nb_pkts))
+		PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
+			   nb_pkts - i);
+	return i;
+}
+
 uint16_t
 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct qede_tx_queue *txq = p_txq;
 	struct qede_dev *qdev = txq->qdev;
 	struct ecore_dev *edev = &qdev->edev;
-	struct qede_fastpath *fp;
-	struct eth_tx_1st_bd *bd1;
 	struct rte_mbuf *mbuf;
 	struct rte_mbuf *m_seg = NULL;
 	uint16_t nb_tx_pkts;
 	uint16_t bd_prod;
 	uint16_t idx;
-	uint16_t tx_count;
 	uint16_t nb_frags;
 	uint16_t nb_pkt_sent = 0;
-
-	fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
+	uint8_t nbds;
+	bool ipv6_ext_flg;
+	bool lso_flg;
+	bool tunn_flg;
+	struct eth_tx_1st_bd *bd1;
+	struct eth_tx_2nd_bd *bd2;
+	struct eth_tx_3rd_bd *bd3;
+	uint64_t tx_ol_flags;
+	uint16_t hdr_size;
 
 	if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
 		PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
 			   nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
-		(void)qede_process_tx_compl(edev, txq);
-	}
-
-	nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
-			ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
-	if (unlikely(nb_tx_pkts == 0)) {
-		PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u",
-			   nb_pkts, txq->nb_tx_avail);
-		return 0;
+		qede_process_tx_compl(edev, txq);
 	}
 
-	tx_count = nb_tx_pkts;
+	nb_tx_pkts  = nb_pkts;
+	bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
 	while (nb_tx_pkts--) {
+		/* Init flags/values */
+		ipv6_ext_flg = false;
+		tunn_flg = false;
+		lso_flg = false;
+		nbds = 0;
+		bd1 = NULL;
+		bd2 = NULL;
+		bd3 = NULL;
+		hdr_size = 0;
+
+		mbuf = *tx_pkts;
+		assert(mbuf);
+
+		/* Check minimum TX BDS availability against available BDs */
+		if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
+			break;
+
+		tx_ol_flags = mbuf->ol_flags;
+
+#define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
+		if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
+			ipv6_ext_flg = true;
+
+		if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
+			tunn_flg = true;
+
+		if (tx_ol_flags & PKT_TX_TCP_SEG)
+			lso_flg = true;
+
+		if (lso_flg) {
+			if (unlikely(txq->nb_tx_avail <
+						ETH_TX_MIN_BDS_PER_LSO_PKT))
+				break;
+		} else {
+			if (unlikely(txq->nb_tx_avail <
+					ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+				break;
+		}
+
+		if (tunn_flg && ipv6_ext_flg) {
+			if (unlikely(txq->nb_tx_avail <
+				ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+				break;
+		}
+		if (ipv6_ext_flg) {
+			if (unlikely(txq->nb_tx_avail <
+					ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
+				break;
+		}
+
 		/* Fill the entry in the SW ring and the BDs in the FW ring */
 		idx = TX_PROD(txq);
-		mbuf = *tx_pkts++;
+		*tx_pkts++;
 		txq->sw_tx_ring[idx].mbuf = mbuf;
+
+		/* BD1 */
 		bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
-		bd1->data.bd_flags.bitfields =
+		memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+		nbds++;
+
+		bd1->data.bd_flags.bitfields |=
 			1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
 		/* FW 8.10.x specific change */
-		bd1->data.bitfields =
+		if (!lso_flg) {
+			bd1->data.bitfields |=
 			(mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
 				<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
-		/* Map MBUF linear data for DMA and set in the first BD */
-		QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
-				     mbuf->data_len);
-		PMD_TX_LOG(INFO, txq, "BD1 len %04x", mbuf->data_len);
+			/* Map MBUF linear data for DMA and set in the BD1 */
+			QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+					     mbuf->data_len);
+		} else {
+			/* For LSO, packet header and payload must reside on
+			 * buffers pointed by different BDs. Using BD1 for HDR
+			 * and BD2 onwards for data.
+			 */
+			hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+			QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+					     hdr_size);
+		}
 
-		if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {
-			PMD_TX_LOG(INFO, txq, "Tx tunnel packet");
+		if (tunn_flg) {
 			/* First indicate its a tunnel pkt */
 			bd1->data.bd_flags.bitfields |=
 				ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
@@ -1231,8 +1543,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 					1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
 
 			/* Outer IP checksum offload */
-			if (mbuf->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
-				PMD_TX_LOG(INFO, txq, "OuterIP csum offload");
+			if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
 				bd1->data.bd_flags.bitfields |=
 					ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
 					ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
@@ -1245,43 +1556,79 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		}
 
 		/* Descriptor based VLAN insertion */
-		if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
-			PMD_TX_LOG(INFO, txq, "Insert VLAN 0x%x",
-				   mbuf->vlan_tci);
+		if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
 			bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
 			bd1->data.bd_flags.bitfields |=
 			    1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
 		}
 
+		if (lso_flg)
+			bd1->data.bd_flags.bitfields |=
+				1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+
 		/* Offload the IP checksum in the hardware */
-		if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
-			PMD_TX_LOG(INFO, txq, "IP csum offload");
+		if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
 			bd1->data.bd_flags.bitfields |=
 			    1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-		}
 
 		/* L4 checksum offload (tcp or udp) */
-		if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
-			PMD_TX_LOG(INFO, txq, "L4 csum offload");
+		if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
+						PKT_TX_UDP_CKSUM)))
+			/* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
 			bd1->data.bd_flags.bitfields |=
 			    1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
-			/* IPv6 + extn. -> later */
+
+		/* BD2 */
+		if (lso_flg || ipv6_ext_flg) {
+			bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
+							(&txq->tx_pbl);
+			memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
+			nbds++;
+			QEDE_BD_SET_ADDR_LEN(bd2,
+					    (hdr_size +
+					    rte_mbuf_data_dma_addr(mbuf)),
+					    mbuf->data_len - hdr_size);
+			/* TBD: check pseudo csum iff tx_prepare not called? */
+			if (ipv6_ext_flg) {
+				bd2->data.bitfields1 |=
+				ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
+				ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+			}
+		}
+
+		/* BD3 */
+		if (lso_flg || ipv6_ext_flg) {
+			bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
+							(&txq->tx_pbl);
+			memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+			nbds++;
+			if (lso_flg) {
+				bd3->data.lso_mss =
+					rte_cpu_to_le_16(mbuf->tso_segsz);
+				/* Using one header BD */
+				bd3->data.bitfields |=
+					rte_cpu_to_le_16(1 <<
+					ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+			}
 		}
 
 		/* Handle fragmented MBUF */
 		m_seg = mbuf->next;
 		/* Encode scatter gather buffer descriptors if required */
-		nb_frags = qede_encode_sg_bd(txq, m_seg, bd1);
-		bd1->data.nbds = nb_frags;
-		txq->nb_tx_avail -= nb_frags;
+		nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
+		bd1->data.nbds = nbds + nb_frags;
+		txq->nb_tx_avail -= bd1->data.nbds;
 		txq->sw_tx_prod++;
 		rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
 		bd_prod =
 		    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+		print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
+		PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
+			   lso_flg, tunn_flg, ipv6_ext_flg);
+#endif
 		nb_pkt_sent++;
 		txq->xmit_pkts++;
-		PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x",
-			   bd1->data.nbds, mbuf->pkt_len);
 	}
 
 	/* Write value of prod idx into bd_prod */
@@ -1292,10 +1639,10 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	rte_wmb();
 
 	/* Check again for Tx completions */
-	(void)qede_process_tx_compl(edev, txq);
+	qede_process_tx_compl(edev, txq);
 
-	PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d",
-		   nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
+	PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
+		   nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
 
 	return nb_pkt_sent;
 }
@@ -1380,8 +1727,7 @@ static int qede_drain_txq(struct qede_dev *qdev,
 		qede_process_tx_compl(edev, txq);
 		if (!cnt) {
 			if (allow_drain) {
-				DP_NOTICE(edev, false,
-					  "Tx queue[%u] is stuck,"
+				DP_ERR(edev, "Tx queue[%u] is stuck,"
 					  "requesting MCP to drain\n",
 					  txq->queue_id);
 				rc = qdev->ops->common->drain(edev);
@@ -1389,13 +1735,11 @@ static int qede_drain_txq(struct qede_dev *qdev,
 					return rc;
 				return qede_drain_txq(qdev, txq, false);
 			}
-
-			DP_NOTICE(edev, false,
-				  "Timeout waiting for tx queue[%d]:"
+			DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
 				  "PROD=%d, CONS=%d\n",
 				  txq->queue_id, txq->sw_tx_prod,
 				  txq->sw_tx_cons);
-			return -ENODEV;
+			return -1;
 		}
 		cnt--;
 		DELAY(1000);
@@ -1412,6 +1756,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
 {
 	struct qed_update_vport_params vport_update_params;
 	struct ecore_dev *edev = &qdev->edev;
+	struct ecore_sge_tpa_params tpa_params;
 	struct qede_fastpath *fp;
 	int rc, tc, i;
 
@@ -1421,9 +1766,15 @@ static int qede_stop_queues(struct qede_dev *qdev)
 	vport_update_params.update_vport_active_flg = 1;
 	vport_update_params.vport_active_flg = 0;
 	vport_update_params.update_rss_flg = 0;
+	/* Disable TPA */
+	if (qdev->enable_lro) {
+		DP_INFO(edev, "Disabling LRO\n");
+		memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+		qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
+		vport_update_params.sge_tpa_params = &tpa_params;
+	}
 
 	DP_INFO(edev, "Deactivate vport\n");
-
 	rc = qdev->ops->vport_update(edev, &vport_update_params);
 	if (rc) {
 		DP_ERR(edev, "Failed to update vport\n");
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 17a2f0c..c27632e 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -126,6 +126,19 @@
 
 #define QEDE_PKT_TYPE_TUNN_MAX_TYPE			0x20 /* 2^5 */
 
+#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM              | \
+				   PKT_TX_TCP_CKSUM             | \
+				   PKT_TX_UDP_CKSUM             | \
+				   PKT_TX_OUTER_IP_CKSUM        | \
+				   PKT_TX_TCP_SEG)
+
+#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
+			      PKT_TX_QINQ_PKT           | \
+			      PKT_TX_VLAN_PKT)
+
+#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
+	(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
+
 /*
  * RX BD descriptor ring
  */
@@ -135,6 +148,19 @@ struct qede_rx_entry {
 	/* allows expansion .. */
 };
 
+/* TPA related structures */
+enum qede_agg_state {
+	QEDE_AGG_STATE_NONE  = 0,
+	QEDE_AGG_STATE_START = 1,
+	QEDE_AGG_STATE_ERROR = 2
+};
+
+struct qede_agg_info {
+	struct rte_mbuf *mbuf;
+	uint16_t start_cqe_bd_len;
+	uint8_t state; /* for sanity check */
+};
+
 /*
  * Structure associated with each RX queue.
  */
@@ -155,6 +181,7 @@ struct qede_rx_queue {
 	uint64_t rx_segs;
 	uint64_t rx_hw_errors;
 	uint64_t rx_alloc_errors;
+	struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
 	struct qede_dev *qdev;
 	void *handle;
 };
@@ -232,6 +259,9 @@ void qede_free_mem_load(struct rte_eth_dev *eth_dev);
 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 
+uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+			     uint16_t nb_pkts);
+
 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts);
 
-- 
1.7.10.3

  parent reply	other threads:[~2017-03-28  6:55 UTC|newest]

Thread overview: 329+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-27  7:56 [PATCH 00/61] net/qede/base: qede PMD enhancements Rasesh Mody
2017-02-27  7:56 ` [PATCH 01/61] net/qede/base: return an initialized return value Rasesh Mody
2017-02-27  7:56 ` [PATCH 02/61] send FW version driver state to MFW Rasesh Mody
2017-03-03 10:26   ` Ferruh Yigit
2017-02-27  7:56 ` [PATCH 03/61] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-02-27  7:56 ` [PATCH 04/61] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-02-27  7:56 ` [PATCH 05/61] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-02-27  7:56 ` [PATCH 06/61] drivers/net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-02-27  7:56 ` [PATCH 07/61] net/qede/base: decrease MAX_HWFNS_PER_DEVICE from 4 to 2 Rasesh Mody
2017-02-27  7:56 ` [PATCH 08/61] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-02-27  7:56 ` [PATCH 09/61] net/qede/base: remove attribute field from update current config Rasesh Mody
2017-02-27  7:56 ` [PATCH 10/61] net/qede/base: add nvram options Rasesh Mody
2017-02-27  7:56 ` [PATCH 11/61] net/qede/base: add comment Rasesh Mody
2017-02-27  7:56 ` [PATCH 12/61] net/qede/base: use default mtu from shared memory Rasesh Mody
2017-02-27  7:56 ` [PATCH 13/61] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-02-27  7:56 ` [PATCH 14/61] net/qede/base: update MFW when default mtu is changed Rasesh Mody
2017-02-27  7:56 ` [PATCH 15/61] net/qede/base: prevent device init failure Rasesh Mody
2017-02-27  7:56 ` [PATCH 16/61] net/qede/base: add support to read personality via MFW commands Rasesh Mody
2017-02-27  7:56 ` [PATCH 17/61] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-02-27  7:56 ` [PATCH 18/61] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-02-27  7:56 ` [PATCH 19/61] net/qede/base: allow only trusted VFs to be promisc/multi-promisc Rasesh Mody
2017-02-27  7:56 ` [PATCH 20/61] net/qede/base: qm initialization revamp Rasesh Mody
2017-02-27  7:56 ` [PATCH 21/61] net/qede/base: add a printout of the FW, MFW and MBI versions Rasesh Mody
2017-02-27  7:56 ` [PATCH 22/61] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-02-27  7:56 ` [PATCH 23/61] net/qede/base: set the drv_type before sending load request Rasesh Mody
2017-02-27  7:56 ` [PATCH 24/61] net/qede/base: prevent driver laod with invalid resources Rasesh Mody
2017-02-27  7:56 ` [PATCH 25/61] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-02-27  7:56 ` [PATCH 26/61] net/qede/base: fix to set pointers to NULL after freeing Rasesh Mody
2017-02-27  7:56 ` [PATCH 27/61] net/qede/base: L2 handler changes Rasesh Mody
2017-02-27  7:56 ` [PATCH 28/61] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-02-27  7:56 ` [PATCH 29/61] net/qede/base: optimize cache-line access Rasesh Mody
2017-02-27  7:56 ` [PATCH 30/61] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-02-27  7:56 ` [PATCH 31/61] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-02-27  7:56 ` [PATCH 32/61] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-02-27  7:56 ` [PATCH 33/61] net/qede/base: formatting changes Rasesh Mody
2017-02-27  7:56 ` [PATCH 34/61] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-02-27  7:56 ` [PATCH 35/61] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-02-27  7:56 ` [PATCH 36/61] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-02-27  7:56 ` [PATCH 37/61] net/qede/base: remove clock slowdown option Rasesh Mody
2017-02-27  7:56 ` [PATCH 38/61] net/qede/base: add new image types Rasesh Mody
2017-02-27  7:56 ` [PATCH 39/61] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-02-27  7:56 ` [PATCH 40/61] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-02-27  7:56 ` [PATCH 41/61] net/qede/base: add support for previous driver unload Rasesh Mody
2017-02-27  7:56 ` [PATCH 42/61] net/qede/base: add non-l2 dcbx tlv application support Rasesh Mody
2017-02-27  7:56 ` [PATCH 43/61] net/qede/base: update bulletin board with link state during init Rasesh Mody
2017-02-27  7:57 ` [PATCH 44/61] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-02-27  7:57 ` [PATCH 45/61] net/qede/base: add macro got resource value message Rasesh Mody
2017-02-27  7:57 ` [PATCH 46/61] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-02-27  7:57 ` [PATCH 47/61] net/qede/base: add macro for unsupported command Rasesh Mody
2017-02-27  7:57 ` [PATCH 48/61] net/qede/base: Add support to set max values of soft resoruces Rasesh Mody
2017-02-27  7:57 ` [PATCH 49/61] net/qede/base: add return code check Rasesh Mody
2017-02-27  7:57 ` [PATCH 50/61] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-02-27  7:57 ` [PATCH 51/61] net/qede/base: move code bits Rasesh Mody
2017-02-27  7:57 ` [PATCH 52/61] net/qede/base: add PF parameter Rasesh Mody
2017-02-27  7:57 ` [PATCH 53/61] net/qede/base: allow PMD to control vport-id and rss-eng-id Rasesh Mody
2017-02-27  7:57 ` [PATCH 54/61] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-02-27  7:57 ` [PATCH 55/61] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-02-27  7:57 ` [PATCH 56/61] net/qede/base: add multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-02-27  7:57 ` [PATCH 57/61] net/qede/base: fix race cond between MFW attentions and PF stop Rasesh Mody
2017-02-27  7:57 ` [PATCH 58/61] net/qede/base: semantic changes Rasesh Mody
2017-02-27  7:57 ` [PATCH 59/61] net/qede/base: add support for arfs mode Rasesh Mody
2017-02-27  7:57 ` [PATCH 60/61] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-02-27  7:57 ` [PATCH 61/61] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-03 10:25 ` [PATCH 00/61] net/qede/base: qede PMD enhancements Ferruh Yigit
2017-03-18  7:05   ` [PATCH v2 " Rasesh Mody
2017-03-20 16:59     ` Ferruh Yigit
2017-03-24  7:27       ` [PATCH v3 " Rasesh Mody
2017-03-24 11:08         ` Ferruh Yigit
2017-03-28  6:42           ` [PATCH 01/62] net/qede/base: return an initialized return value Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 00/62] net/qede/base: update PMD to 2.4.0.1 Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 " Rasesh Mody
2017-03-30 12:23               ` Ferruh Yigit
2017-03-29 20:36             ` [PATCH v5 01/62] net/qede/base: return an initialized return value Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 02/62] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 03/62] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 04/62] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 05/62] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 06/62] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 07/62] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 08/62] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 09/62] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 10/62] net/qede/base: add nvram options Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 11/62] net/qede/base: add comment Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 12/62] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 13/62] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 14/62] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 15/62] net/qede/base: prevent device init failure Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 16/62] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 17/62] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 18/62] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 19/62] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 20/62] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 21/62] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 22/62] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 23/62] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 24/62] net/qede/base: prevent driver load with invalid resources Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 25/62] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 26/62] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 27/62] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 28/62] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 29/62] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 30/62] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 31/62] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 32/62] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 33/62] net/qede/base: formatting changes Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 34/62] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 35/62] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 36/62] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 37/62] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 38/62] net/qede/base: add new image types Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 39/62] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 40/62] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 41/62] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 42/62] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 43/62] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 44/62] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 45/62] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 46/62] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 47/62] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 48/62] net/qede/base: set max values for soft resources Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 49/62] net/qede/base: add return code check Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 50/62] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 51/62] net/qede/base: move code bits Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 52/62] net/qede/base: add PF parameter Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 53/62] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 54/62] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 55/62] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 56/62] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 57/62] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 58/62] net/qede/base: semantic changes Rasesh Mody
2017-03-29 20:36             ` [PATCH v5 59/62] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-29 20:37             ` [PATCH v5 60/62] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-29 20:37             ` [PATCH v5 61/62] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-29 20:37             ` [PATCH v5 62/62] net/qede: update PMD version to 2.4.0.1 Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 01/62] net/qede/base: return an initialized return value Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 02/62] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 03/62] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 04/62] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 05/62] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 06/62] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 07/62] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 08/62] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 09/62] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 10/62] net/qede/base: add nvram options Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 11/62] net/qede/base: add comment Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 12/62] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 13/62] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 14/62] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 15/62] net/qede/base: prevent device init failure Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 16/62] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 17/62] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 18/62] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 19/62] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 20/62] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 21/62] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 22/62] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 23/62] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 24/62] net/qede/base: prevent driver load with invalid resources Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 25/62] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 26/62] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 27/62] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 28/62] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-28  6:51           ` [PATCH v4 29/62] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 30/62] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 31/62] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-28 11:22             ` Ferruh Yigit
2017-03-28 21:18               ` Mody, Rasesh
2017-03-29  9:23                 ` Ferruh Yigit
2017-03-29 20:48                   ` Mody, Rasesh
2017-03-28  6:52           ` [PATCH v4 32/62] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 33/62] net/qede/base: formatting changes Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 34/62] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 35/62] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 36/62] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 37/62] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 38/62] net/qede/base: add new image types Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 39/62] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 40/62] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 41/62] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 42/62] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 43/62] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 44/62] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 45/62] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 46/62] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 47/62] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 48/62] net/qede/base: set max values for soft resources Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 49/62] net/qede/base: add return code check Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 50/62] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 51/62] net/qede/base: move code bits Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 52/62] net/qede/base: add PF parameter Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 53/62] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 54/62] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 55/62] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 56/62] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 57/62] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 58/62] net/qede/base: semantic changes Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 59/62] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-28  6:52           ` [PATCH v4 60/62] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-28  6:52           ` Rasesh Mody [this message]
2017-03-28  6:52           ` [PATCH v4 62/62] net/qede: update PMD version to 2.4.0.1 Rasesh Mody
     [not found]           ` <1490683278-23776-1-git-send-email-y>
2017-03-28  6:54             ` [PATCH 00/62] net/qede/base: update PMD " Mody, Rasesh
2017-03-24  7:27       ` [PATCH v3 01/61] net/qede/base: return an initialized return value Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 02/61] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 03/61] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 04/61] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 05/61] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 06/61] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 07/61] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 08/61] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-24  7:27       ` [PATCH v3 09/61] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 10/61] net/qede/base: add nvram options Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 11/61] net/qede/base: add comment Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 12/61] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 13/61] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 14/61] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 15/61] net/qede/base: prevent device init failure Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 16/61] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 17/61] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 18/61] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 19/61] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 20/61] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 21/61] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 22/61] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 23/61] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 24/61] net/qede/base: prevent driver laod with invalid resources Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 25/61] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 26/61] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 27/61] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 28/61] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 29/61] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 30/61] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 31/61] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 32/61] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 33/61] net/qede/base: formatting changes Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 34/61] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 35/61] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 36/61] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 37/61] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 38/61] net/qede/base: add new image types Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 39/61] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 40/61] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 41/61] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-24 11:00         ` Ferruh Yigit
2017-03-25  6:25           ` Mody, Rasesh
2017-03-24  7:28       ` [PATCH v3 42/61] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 43/61] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 44/61] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 45/61] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 46/61] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 47/61] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 48/61] net/qede/base: set max values for soft resoruces Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 49/61] net/qede/base: add return code check Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 50/61] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 51/61] net/qede/base: move code bits Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 52/61] net/qede/base: add PF parameter Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 53/61] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 54/61] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 55/61] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 56/61] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 57/61] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 58/61] net/qede/base: semantic changes Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 59/61] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 60/61] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-24  7:28       ` [PATCH v3 61/61] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-24  7:45       ` [PATCH v2 00/61] net/qede/base: qede PMD enhancements Mody, Rasesh
2017-03-18  7:05   ` [PATCH v2 01/61] net/qede/base: return an initialized return value Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 02/61] net/qede/base: send FW version driver state to MFW Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 03/61] net/qede/base: mask Rx buffer attention bits Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 04/61] net/qede/base: print various indication on Tx-timeouts Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 05/61] net/qede/base: utilize FW 8.18.9.0 Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 06/61] net/qede: upgrade the FW to 8.18.9.0 Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 07/61] net/qede/base: decrease maximum HW func per device Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 08/61] net/qede/base: move mask constants defining NIC type Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 09/61] net/qede/base: remove attribute from update current config Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 10/61] net/qede/base: add nvram options Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 11/61] net/qede/base: add comment Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 12/61] net/qede/base: use default MTU from shared memory Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 13/61] net/qede/base: change queue/sb-id from 8 bit to 16 bit Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 14/61] net/qede/base: update MFW when default MTU is changed Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 15/61] net/qede/base: prevent device init failure Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 16/61] net/qede/base: read card personality via MFW commands Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 17/61] net/qede/base: allow probe to succeed with minor HW-issues Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 18/61] net/qede/base: remove unneeded step in HW init Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 19/61] net/qede/base: allow only trusted VFs to be promisc Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 20/61] net/qede/base: qm initialization revamp Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 21/61] net/qede/base: print firmware MFW and MBI versions Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 22/61] net/qede/base: check active VF queues before stopping Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 23/61] net/qede/base: set driver type before sending load request Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 24/61] net/qede/base: prevent driver laod with invalid resources Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 25/61] net/qede/base: add interfaces for MFW TLV request processing Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 26/61] net/qede/base: code refactoring of SP queues Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 27/61] net/qede/base: make L2 queues handle based Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 28/61] net/qede/base: add support for handling TLV request from MFW Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 29/61] net/qede/base: optimize cache-line access Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 30/61] net/qede/base: infrastructure changes for VF tunnelling Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 31/61] net/qede/base: revise tunnel APIs/structs Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 32/61] net/qede/base: add tunnelling support for VFs Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 33/61] net/qede/base: formatting changes Rasesh Mody
2017-03-18  7:05   ` [PATCH v2 34/61] net/qede/base: prevent transmitter stuck condition Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 35/61] net/qede/base: add mask/shift defines for resource command Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 36/61] net/qede/base: add API for using MFW resource lock Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 37/61] net/qede/base: remove clock slowdown option Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 38/61] net/qede/base: add new image types Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 39/61] net/qede/base: use L2-handles for RSS configuration Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 40/61] net/qede/base: change valloc to vzalloc Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 41/61] net/qede/base: add support for previous driver unload Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 42/61] net/qede/base: add non-L2 dcbx tlv application support Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 43/61] net/qede/base: update bulletin board during VF init Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 44/61] net/qede/base: add coalescing support for VFs Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 45/61] net/qede/base: add macro got resource value message Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 46/61] net/qede/base: add mailbox for resource allocation Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 47/61] net/qede/base: add macro for unsupported command Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 48/61] net/qede/base: set max values for soft resoruces Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 49/61] net/qede/base: add return code check Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 50/61] net/qede/base: zero out MFW mailbox data Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 51/61] net/qede/base: move code bits Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 52/61] net/qede/base: add PF parameter Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 53/61] net/qede/base: allow PMD to control vport and RSS engine ids Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 54/61] net/qede/base: add udp ports in bulletin board message Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 55/61] net/qede/base: prevent DMAE transactions during recovery Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 56/61] net/qede/base: multi-Txq support on same queue-zone for VFs Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 57/61] net/qede/base: prevent race condition during unload Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 58/61] net/qede/base: semantic changes Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 59/61] net/qede/base: add support for arfs mode Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 60/61] net/qede: add ntuple and flow director filter support Rasesh Mody
2017-03-18  7:06   ` [PATCH v2 61/61] net/qede: add LRO/TSO offloads support Rasesh Mody
2017-03-24 11:58     ` Ferruh Yigit
2017-03-25  6:28       ` Mody, Rasesh
2017-03-18  7:18   ` [PATCH 00/61] net/qede/base: qede PMD enhancements Mody, Rasesh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1490683952-24919-62-git-send-email-rasesh.mody@cavium.com \
    --to=rasesh.mody@cavium.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=harish.patil@qlogic.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.