All of lore.kernel.org
 help / color / mirror / Atom feed
From: <manish.rangankar@cavium.com>
To: <lduncan@suse.com>, <cleech@redhat.com>
Cc: <martin.petersen@oracle.com>, <jejb@linux.vnet.ibm.com>,
	<linux-scsi@vger.kernel.org>, <netdev@vger.kernel.org>,
	<Yuval.Mintz@cavium.com>, <QLogic-Storage-Upstream@cavium.com>,
	Yuval Mintz <Yuval.Mintz@qlogic.com>,
	Arun Easi <arun.easi@cavium.com>,
	Yuval Mintz <yuval.mintz@cavium.com>
Subject: [RFC 2/6] qed: Add iSCSI out of order packet handling.
Date: Wed, 19 Oct 2016 01:01:09 -0400	[thread overview]
Message-ID: <1476853273-22960-3-git-send-email-manish.rangankar@cavium.com> (raw)
In-Reply-To: <1476853273-22960-1-git-send-email-manish.rangankar@cavium.com>

From: Yuval Mintz <Yuval.Mintz@qlogic.com>

This patch adds out of order packet handling for hardware offloaded
iSCSI. Out of order packet handling requires driver buffer allocation
and assistance.

Signed-off-by: Arun Easi <arun.easi@cavium.com>
Signed-off-by: Yuval Mintz <yuval.mintz@cavium.com>
---
 drivers/net/ethernet/qlogic/qed/Makefile   |   2 +-
 drivers/net/ethernet/qlogic/qed/qed.h      |   1 +
 drivers/net/ethernet/qlogic/qed/qed_dev.c  |  14 +-
 drivers/net/ethernet/qlogic/qed/qed_ll2.c  | 559 +++++++++++++++++++++++++++--
 drivers/net/ethernet/qlogic/qed/qed_ll2.h  |   9 +
 drivers/net/ethernet/qlogic/qed/qed_ooo.c  | 510 ++++++++++++++++++++++++++
 drivers/net/ethernet/qlogic/qed/qed_ooo.h  | 116 ++++++
 drivers/net/ethernet/qlogic/qed/qed_roce.c |   1 +
 drivers/net/ethernet/qlogic/qed/qed_spq.c  |   9 +
 9 files changed, 1195 insertions(+), 26 deletions(-)
 create mode 100644 drivers/net/ethernet/qlogic/qed/qed_ooo.c
 create mode 100644 drivers/net/ethernet/qlogic/qed/qed_ooo.h

diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index b76669c..9121bf0 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -6,4 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
 qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
 qed-$(CONFIG_QED_LL2) += qed_ll2.o
 qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
-qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index a61b1c0..e5626ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -380,6 +380,7 @@ struct qed_hwfn {
 	/* Protocol related */
 	bool				using_ll2;
 	struct qed_ll2_info		*p_ll2_info;
+	struct qed_ooo_info		*p_ooo_info;
 	struct qed_rdma_info		*p_rdma_info;
 	struct qed_iscsi_info		*p_iscsi_info;
 	struct qed_pf_params		pf_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a4234c0..060e9a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -32,6 +32,7 @@
 #include "qed_iscsi.h"
 #include "qed_ll2.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_sriov.h"
@@ -157,8 +158,10 @@ void qed_resc_free(struct qed_dev *cdev)
 		qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
 #endif
 		if (IS_ENABLED(CONFIG_QEDI) &&
-				p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+				p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
 			qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
+			qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+		}
 		qed_iov_free(p_hwfn);
 		qed_dmae_info_free(p_hwfn);
 		qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -416,6 +419,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 int qed_resc_alloc(struct qed_dev *cdev)
 {
 	struct qed_iscsi_info *p_iscsi_info;
+	struct qed_ooo_info *p_ooo_info;
 #ifdef CONFIG_QED_LL2
 	struct qed_ll2_info *p_ll2_info;
 #endif
@@ -543,6 +547,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
 			if (!p_iscsi_info)
 				goto alloc_no_mem;
 			p_hwfn->p_iscsi_info = p_iscsi_info;
+			p_ooo_info = qed_ooo_alloc(p_hwfn);
+			if (!p_ooo_info)
+				goto alloc_no_mem;
+			p_hwfn->p_ooo_info = p_ooo_info;
 		}
 
 		/* DMA info initialization */
@@ -598,8 +606,10 @@ void qed_resc_setup(struct qed_dev *cdev)
 			qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
 #endif
 		if (IS_ENABLED(CONFIG_QEDI) &&
-				p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+				p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
 			qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
+			qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
+		}
 	}
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index e67f3c9..4ce12e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -36,6 +36,7 @@
 #include "qed_int.h"
 #include "qed_ll2.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 
@@ -295,27 +296,36 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 		list_del(&p_pkt->list_entry);
 		b_last_packet = list_empty(&p_tx->active_descq);
 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
-		p_tx->cur_completing_packet = *p_pkt;
-		p_tx->cur_completing_bd_idx = 1;
-		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
-		tx_frag = p_pkt->bds_set[0].tx_frag;
+		if (IS_ENABLED(CONFIG_QEDI) &&
+			p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+			struct qed_ooo_buffer *p_buffer;
+
+			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+		} else {
+			p_tx->cur_completing_packet = *p_pkt;
+			p_tx->cur_completing_bd_idx = 1;
+			b_last_frag = p_tx->cur_completing_bd_idx ==
+				p_pkt->bd_used;
+			tx_frag = p_pkt->bds_set[0].tx_frag;
 #if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
-		if (p_ll2_conn->gsi_enable)
-			qed_ll2b_release_tx_gsi_packet(p_hwfn,
-						       p_ll2_conn->my_id,
-						       p_pkt->cookie,
-						       tx_frag,
-						       b_last_frag,
-						       b_last_packet);
-		else
+			if (p_ll2_conn->gsi_enable)
+				qed_ll2b_release_tx_gsi_packet(p_hwfn,
+					       p_ll2_conn->my_id,
+					       p_pkt->cookie,
+					       tx_frag,
+					       b_last_frag,
+					       b_last_packet);
+			else
 #endif
-			qed_ll2b_complete_tx_packet(p_hwfn,
+				qed_ll2b_complete_tx_packet(p_hwfn,
 						    p_ll2_conn->my_id,
 						    p_pkt->cookie,
 						    tx_frag,
 						    b_last_frag,
 						    b_last_packet);
-
+		}
 	}
 }
 
@@ -546,13 +556,466 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 		list_del(&p_pkt->list_entry);
 		list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-		rx_buf_addr = p_pkt->rx_buf_addr;
-		cookie = p_pkt->cookie;
+		if (IS_ENABLED(CONFIG_QEDI) &&
+			p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+			struct qed_ooo_buffer *p_buffer;
+
+			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+		} else {
+			rx_buf_addr = p_pkt->rx_buf_addr;
+			cookie = p_pkt->cookie;
+
+			b_last = list_empty(&p_rx->active_descq);
+		}
+	}
+}
+
+#if IS_ENABLED(CONFIG_QEDI)
+static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+{
+	u8 bd_flags = 0;
+
+	if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+
+	return bd_flags;
+}
+
+static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+	u16 packet_length = 0, parse_flags = 0, vlan = 0;
+	struct qed_ll2_rx_packet *p_pkt = NULL;
+	u32 num_ooo_add_to_peninsula = 0, cid;
+	union core_rx_cqe_union *cqe = NULL;
+	u16 cq_new_idx = 0, cq_old_idx = 0;
+	struct qed_ooo_buffer *p_buffer;
+	struct ooo_opaque *iscsi_ooo;
+	u8 placement_offset = 0;
+	u8 cqe_type;
+	int rc;
+
+	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+	if (cq_new_idx == cq_old_idx)
+		return 0;
+
+	while (cq_new_idx != cq_old_idx) {
+		struct core_rx_fast_path_cqe *p_cqe_fp;
+
+		cqe = qed_chain_consume(&p_rx->rcq_chain);
+		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+		cqe_type = cqe->rx_cqe_sp.type;
+
+		if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+			DP_NOTICE(p_hwfn,
+				  "Got a non-regular LB LL2 completion [type 0x%02x]\n",
+				  cqe_type);
+			return -EINVAL;
+		}
+		p_cqe_fp = &cqe->rx_cqe_fp;
+
+		placement_offset = p_cqe_fp->placement_offset;
+		parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
+		packet_length = le16_to_cpu(p_cqe_fp->packet_length);
+		vlan = le16_to_cpu(p_cqe_fp->vlan);
+		iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+		qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
+					   iscsi_ooo);
+		cid = le32_to_cpu(iscsi_ooo->cid);
+
+		/* Process delete isle first */
+		if (iscsi_ooo->drop_size)
+			qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
+					     iscsi_ooo->drop_isle,
+					     iscsi_ooo->drop_size);
+
+		if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+			continue;
+
+		/* Now process create/add/join isles */
+		if (list_empty(&p_rx->active_descq)) {
+			DP_NOTICE(p_hwfn,
+				  "LL2 OOO RX chain has no submitted buffers\n");
+			return -EIO;
+		}
+
+		p_pkt = list_first_entry(&p_rx->active_descq,
+					 struct qed_ll2_rx_packet, list_entry);
+
+		if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
+			if (!p_pkt) {
+				DP_NOTICE(p_hwfn,
+					  "LL2 OOO RX packet is not valid\n");
+				return -EIO;
+			}
+			list_del(&p_pkt->list_entry);
+			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+			p_buffer->packet_length = packet_length;
+			p_buffer->parse_flags = parse_flags;
+			p_buffer->vlan = vlan;
+			p_buffer->placement_offset = placement_offset;
+			qed_chain_consume(&p_rx->rxq_chain);
+			list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+			switch (iscsi_ooo->ooo_opcode) {
+			case TCP_EVENT_ADD_NEW_ISLE:
+				qed_ooo_add_new_isle(p_hwfn,
+						     p_hwfn->p_ooo_info,
+						     cid,
+						     iscsi_ooo->ooo_isle,
+						     p_buffer);
+				break;
+			case TCP_EVENT_ADD_ISLE_RIGHT:
+				qed_ooo_add_new_buffer(p_hwfn,
+						       p_hwfn->p_ooo_info,
+						       cid,
+						       iscsi_ooo->ooo_isle,
+						       p_buffer,
+						       QED_OOO_RIGHT_BUF);
+				break;
+			case TCP_EVENT_ADD_ISLE_LEFT:
+				qed_ooo_add_new_buffer(p_hwfn,
+						       p_hwfn->p_ooo_info,
+						       cid,
+						       iscsi_ooo->ooo_isle,
+						       p_buffer,
+						       QED_OOO_LEFT_BUF);
+				break;
+			case TCP_EVENT_JOIN:
+				qed_ooo_add_new_buffer(p_hwfn,
+						       p_hwfn->p_ooo_info,
+						       cid,
+						       iscsi_ooo->ooo_isle +
+						       1,
+						       p_buffer,
+						       QED_OOO_LEFT_BUF);
+				qed_ooo_join_isles(p_hwfn,
+						   p_hwfn->p_ooo_info,
+						   cid, iscsi_ooo->ooo_isle);
+				break;
+			case TCP_EVENT_ADD_PEN:
+				num_ooo_add_to_peninsula++;
+				qed_ooo_put_ready_buffer(p_hwfn,
+							 p_hwfn->p_ooo_info,
+							 p_buffer, true);
+				break;
+			}
+		} else {
+			DP_NOTICE(p_hwfn,
+				  "Unexpected event (%d) TX OOO completion\n",
+				  iscsi_ooo->ooo_opcode);
+		}
+	}
 
-		b_last = list_empty(&p_rx->active_descq);
+	/* Submit RX buffer here */
+	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+						   p_hwfn->p_ooo_info))) {
+		rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
+					    p_buffer->rx_buffer_phys_addr,
+					    0, p_buffer, true);
+		if (rc) {
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+			break;
+		}
 	}
+
+	/* Submit Tx buffers here */
+	while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
+						    p_hwfn->p_ooo_info))) {
+		u16 l4_hdr_offset_w = 0;
+		dma_addr_t first_frag;
+		u8 bd_flags = 0;
+
+		first_frag = p_buffer->rx_buffer_phys_addr +
+			     p_buffer->placement_offset;
+		parse_flags = p_buffer->parse_flags;
+		bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+
+		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
+					       p_buffer->vlan, bd_flags,
+					       l4_hdr_offset_w,
+					       p_ll2_conn->tx_dest, 0,
+					       first_frag,
+					       p_buffer->packet_length,
+					       p_buffer, true);
+		if (rc) {
+			qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						 p_buffer, false);
+			break;
+		}
+	}
+
+	return 0;
 }
 
+static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+	struct qed_ll2_tx_packet *p_pkt = NULL;
+	struct qed_ooo_buffer *p_buffer;
+	bool b_dont_submit_rx = false;
+	u16 new_idx = 0, num_bds = 0;
+	int rc;
+
+	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+
+	if (!num_bds)
+		return 0;
+
+	while (num_bds) {
+		if (list_empty(&p_tx->active_descq))
+			return -EINVAL;
+
+		p_pkt = list_first_entry(&p_tx->active_descq,
+					 struct qed_ll2_tx_packet, list_entry);
+		if (!p_pkt)
+			return -EINVAL;
+
+		if (p_pkt->bd_used != 1) {
+			DP_NOTICE(p_hwfn,
+				  "Unexpectedly many BDs(%d) in TX OOO completion\n",
+				  p_pkt->bd_used);
+			return -EINVAL;
+		}
+
+		list_del(&p_pkt->list_entry);
+
+		num_bds--;
+		p_tx->bds_idx++;
+		qed_chain_consume(&p_tx->txq_chain);
+
+		p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+		if (b_dont_submit_rx) {
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+			continue;
+		}
+
+		rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
+					    p_buffer->rx_buffer_phys_addr, 0,
+					    p_buffer, true);
+		if (rc != 0) {
+			qed_ooo_put_free_buffer(p_hwfn,
+						p_hwfn->p_ooo_info, p_buffer);
+			b_dont_submit_rx = true;
+		}
+	}
+
+	/* Submit Tx buffers here */
+	while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
+						    p_hwfn->p_ooo_info))) {
+		u16 l4_hdr_offset_w = 0, parse_flags = p_buffer->parse_flags;
+		dma_addr_t first_frag;
+		u8 bd_flags = 0;
+
+		first_frag = p_buffer->rx_buffer_phys_addr +
+		    p_buffer->placement_offset;
+		bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
+					       p_buffer->vlan, bd_flags,
+					       l4_hdr_offset_w,
+					       p_ll2_conn->tx_dest, 0,
+					       first_frag,
+					       p_buffer->packet_length,
+					       p_buffer, true);
+		if (rc != 0) {
+			qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						 p_buffer, false);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+			       struct qed_ll2_info *p_ll2_info,
+			       u16 rx_num_ooo_buffers, u16 mtu)
+{
+	struct qed_ooo_buffer *p_buf = NULL;
+	void *p_virt;
+	u16 buf_idx;
+	int rc = 0;
+
+	if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+		return rc;
+
+	if (!rx_num_ooo_buffers)
+		return -EINVAL;
+
+	for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
+		p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+		if (!p_buf) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to allocate ooo descriptor\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+		p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+					 ETH_CACHE_LINE_SIZE - 1) &
+					~(ETH_CACHE_LINE_SIZE - 1);
+		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+					    p_buf->rx_buffer_size,
+					    &p_buf->rx_buffer_phys_addr,
+					    GFP_KERNEL);
+		if (!p_virt) {
+			DP_NOTICE(p_hwfn, "Failed to allocate ooo buffer\n");
+			kfree(p_buf);
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		p_buf->rx_buffer_virt_addr = p_virt;
+		qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+		   "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+		   rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
+	return rc;
+}
+
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+				 struct qed_ll2_info *p_ll2_conn)
+{
+	struct qed_ooo_buffer *p_buffer;
+	int rc;
+
+	if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+		return;
+
+	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+						   p_hwfn->p_ooo_info))) {
+		rc = qed_ll2_post_rx_buffer(p_hwfn,
+					    p_ll2_conn->my_id,
+					    p_buffer->rx_buffer_phys_addr,
+					    0, p_buffer, true);
+		if (rc) {
+			qed_ooo_put_free_buffer(p_hwfn,
+						p_hwfn->p_ooo_info, p_buffer);
+			break;
+		}
+	}
+}
+
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+					   struct qed_ll2_info *p_ll2_conn)
+{
+	struct qed_ooo_buffer *p_buffer;
+
+	if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+		return;
+
+	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+						   p_hwfn->p_ooo_info))) {
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_buffer->rx_buffer_size,
+				  p_buffer->rx_buffer_virt_addr,
+				  p_buffer->rx_buffer_phys_addr);
+		kfree(p_buffer);
+	}
+}
+
+static void qed_ll2_stop_ooo(struct qed_dev *cdev)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+
+	DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
+		   *handle);
+
+	qed_ll2_terminate_connection(hwfn, *handle);
+	qed_ll2_release_connection(hwfn, *handle);
+	*handle = QED_LL2_UNUSED_HANDLE;
+}
+
+static int qed_ll2_start_ooo(struct qed_dev *cdev,
+			     struct qed_ll2_params *params)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+	struct qed_ll2_info *ll2_info;
+	int rc;
+
+	ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
+	if (!ll2_info) {
+		DP_INFO(cdev, "Failed to allocate LL2 info buffer\n");
+		return -ENOMEM;
+	}
+	ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
+	ll2_info->mtu = params->mtu;
+	ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
+	ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
+	ll2_info->tx_tc = OOO_LB_TC;
+	ll2_info->tx_dest = CORE_TX_DEST_LB;
+
+	rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+					QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+					handle);
+	kfree(ll2_info);
+	if (rc) {
+		DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+		goto out;
+	}
+
+	rc = qed_ll2_establish_connection(hwfn, *handle);
+	if (rc) {
+		DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	qed_ll2_release_connection(hwfn, *handle);
+out:
+	*handle = QED_LL2_UNUSED_HANDLE;
+	return rc;
+}
+#else /* IS_ENABLED(CONFIG_QEDI) */
+static inline int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
+		void *p_cookie) { return -EINVAL; }
+static inline int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
+		void *p_cookie) { return -EINVAL; }
+static inline int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+			struct qed_ll2_info *p_ll2_info,
+			u16 rx_num_ooo_buffers, u16 mtu) { return -EINVAL; }
+static inline void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+			struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+			struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
+static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
+			struct qed_ll2_params *params) { return -EINVAL; }
+#endif /* IS_ENABLED(CONFIG_QEDI) */
+
 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
 				     struct qed_ll2_info *p_ll2_conn,
 				     u8 action_on_error)
@@ -594,7 +1057,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
 	p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
 	p_ramrod->queue_id = p_ll2_conn->queue_id;
-	p_ramrod->main_func_queue = 1;
+	p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
+									  : 1;
 
 	if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
 	    p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
@@ -625,6 +1089,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
 		return 0;
 
+	if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+		p_ll2_conn->tx_stats_en = 0;
+	else
+		p_ll2_conn->tx_stats_en = 1;
+
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
 	init_data.cid = p_ll2_conn->cid;
@@ -642,7 +1111,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
 	p_ramrod->sb_index = p_tx->tx_sb_index;
 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
-	p_ll2_conn->tx_stats_en = 1;
 	p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
 	p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
 
@@ -866,9 +1334,22 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
 	if (rc)
 		goto q_allocate_fail;
 
+	if (IS_ENABLED(CONFIG_QEDI)) {
+		rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
+					    rx_num_desc * 2, p_params->mtu);
+		if (rc)
+			goto q_allocate_fail;
+	}
+
 	/* Register callbacks for the Rx/Tx queues */
-	comp_rx_cb = qed_ll2_rxq_completion;
-	comp_tx_cb = qed_ll2_txq_completion;
+	if (IS_ENABLED(CONFIG_QEDI) &&
+			p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+		comp_rx_cb = qed_ll2_lb_rxq_completion;
+		comp_tx_cb = qed_ll2_lb_txq_completion;
+	} else {
+		comp_rx_cb = qed_ll2_rxq_completion;
+		comp_tx_cb = qed_ll2_txq_completion;
+	}
 
 	if (rx_num_desc) {
 		qed_int_register_cb(p_hwfn, comp_rx_cb,
@@ -981,6 +1462,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 	if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
 
+	if (IS_ENABLED(CONFIG_QEDI))
+		qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
+
 	return rc;
 }
 
@@ -1223,6 +1707,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 			      u16 vlan,
 			      u8 bd_flags,
 			      u16 l4_hdr_offset_w,
+			      enum qed_ll2_tx_dest e_tx_dest,
 			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
 			      dma_addr_t first_frag,
 			      u16 first_frag_len, void *cookie, u8 notify_fw)
@@ -1232,6 +1717,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 	enum core_roce_flavor_type roce_flavor;
 	struct qed_ll2_tx_queue *p_tx;
 	struct qed_chain *p_tx_chain;
+	enum core_tx_dest tx_dest;
 	unsigned long flags;
 	int rc = 0;
 
@@ -1262,6 +1748,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 		goto out;
 	}
 
+	tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
+						    CORE_TX_DEST_LB;
 	if (qed_roce_flavor == QED_LL2_ROCE) {
 		roce_flavor = CORE_ROCE;
 	} else if (qed_roce_flavor == QED_LL2_RROCE) {
@@ -1276,7 +1764,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 				      num_of_bds, first_frag,
 				      first_frag_len, cookie, notify_fw);
 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
-					 num_of_bds, CORE_TX_DEST_NW,
+					 num_of_bds, tx_dest,
 					 vlan, bd_flags, l4_hdr_offset_w,
 					 roce_flavor,
 					 first_frag, first_frag_len);
@@ -1351,6 +1839,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 		qed_ll2_rxq_flush(p_hwfn, connection_handle);
 	}
 
+	if (IS_ENABLED(CONFIG_QEDI) &&
+			p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+		qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+
 	return rc;
 }
 
@@ -1381,6 +1873,9 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
 	qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
 
+	if (IS_ENABLED(CONFIG_QEDI))
+		qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
+
 	mutex_lock(&p_ll2_conn->mutex);
 	p_ll2_conn->b_active = false;
 	mutex_unlock(&p_ll2_conn->mutex);
@@ -1628,6 +2123,18 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 		goto release_terminate;
 	}
 
+	if (IS_ENABLED(CONFIG_QEDI) &&
+		(cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI) &&
+		cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+		DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
+		rc = qed_ll2_start_ooo(cdev, params);
+		if (rc) {
+			DP_INFO(cdev,
+				"Failed to initialize the OOO LL2 queue\n");
+			goto release_terminate;
+		}
+	}
+
 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 	if (!p_ptt) {
 		DP_INFO(cdev, "Failed to acquire PTT\n");
@@ -1677,6 +2184,11 @@ static int qed_ll2_stop(struct qed_dev *cdev)
 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
 	eth_zero_addr(cdev->ll2_mac_address);
 
+	if (IS_ENABLED(CONFIG_QEDI) &&
+		(cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI) &&
+		cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+		qed_ll2_stop_ooo(cdev);
+
 	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
 					  cdev->ll2->handle);
 	if (rc)
@@ -1731,7 +2243,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
 				       cdev->ll2->handle,
 				       1 + skb_shinfo(skb)->nr_frags,
-				       vlan, flags, 0, 0 /* RoCE FLAVOR */,
+				       vlan, flags, 0, QED_LL2_TX_DEST_NW,
+				       0 /* RoCE FLAVOR */,
 				       mapping, skb->len, skb, 1);
 	if (rc)
 		goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2..2b31d30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -41,6 +41,12 @@ enum qed_ll2_conn_type {
 	MAX_QED_LL2_RX_CONN_TYPE
 };
 
+enum qed_ll2_tx_dest {
+	QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
+	QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
+	QED_LL2_TX_DEST_MAX
+};
+
 struct qed_ll2_rx_packet {
 	struct list_head list_entry;
 	struct core_rx_bd_with_buff_len *rxq_bd;
@@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
  * @param l4_hdr_offset_w	L4 Header Offset from start of packet
  *				(in words). This is needed if both l4_csum
  *				and ipv6_ext are set
+ * @param e_tx_dest             indicates if the packet is to be transmitted via
+ *                              loopback or to the network
  * @param first_frag
  * @param first_frag_len
  * @param cookie
@@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 			      u16 vlan,
 			      u8 bd_flags,
 			      u16 l4_hdr_offset_w,
+			      enum qed_ll2_tx_dest e_tx_dest,
 			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
 			      dma_addr_t first_frag,
 			      u16 first_frag_len, void *cookie, u8 notify_fw);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
new file mode 100644
index 0000000..a037a6f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -0,0 +1,510 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_ooo.h"
+
+static struct qed_ooo_archipelago
+*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info
+			  *p_ooo_info,
+			  u32 cid)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+
+	list_for_each_entry(p_archipelago,
+			    &p_ooo_info->archipelagos_list, list_entry) {
+		if (p_archipelago->cid == cid)
+			return p_archipelago;
+	}
+
+	return NULL;
+}
+
+static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
+					      struct qed_ooo_info *p_ooo_info,
+					      u32 cid, u8 isle)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_isle = NULL;
+	u8 the_num_of_isle = 1;
+
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	if (!p_archipelago) {
+		DP_NOTICE(p_hwfn,
+			  "Connection %d is not found in OOO list\n", cid);
+		return NULL;
+	}
+
+	list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) {
+		if (the_num_of_isle == isle)
+			return p_isle;
+		the_num_of_isle++;
+	}
+
+	return NULL;
+}
+
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+				struct qed_ooo_info *p_ooo_info,
+				struct ooo_opaque *p_cqe)
+{
+	struct qed_ooo_history *p_history = &p_ooo_info->ooo_history;
+
+	if (p_history->head_idx == p_history->num_of_cqes)
+		p_history->head_idx = 0;
+	p_history->p_cqes[p_history->head_idx] = *p_cqe;
+	p_history->head_idx++;
+}
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ooo_info *p_ooo_info;
+	u16 max_num_archipelagos = 0;
+	u16 max_num_isles = 0;
+	u32 i;
+
+	if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate qed_ooo_info: unknown personality\n");
+		return NULL;
+	}
+
+	max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
+	max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+
+	if (!max_num_archipelagos) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate qed_ooo_info: unknown amount of connections\n");
+		return NULL;
+	}
+
+	p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
+	if (!p_ooo_info) {
+		DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
+	INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
+	INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
+	INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
+	INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
+
+	p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
+					  sizeof(struct qed_ooo_isle),
+					  GFP_KERNEL);
+	if (!p_ooo_info->p_isles_mem) {
+		DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info(isles)\n");
+		goto no_isles_mem;
+	}
+
+	for (i = 0; i < max_num_isles; i++) {
+		INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list);
+		list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry,
+			      &p_ooo_info->free_isles_list);
+	}
+
+	p_ooo_info->p_archipelagos_mem =
+				kcalloc(max_num_archipelagos,
+					sizeof(struct qed_ooo_archipelago),
+					GFP_KERNEL);
+	if (!p_ooo_info->p_archipelagos_mem) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate qed_ooo_info(archpelagos)\n");
+		goto no_archipelagos_mem;
+	}
+
+	for (i = 0; i < max_num_archipelagos; i++) {
+		INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
+		list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
+			      &p_ooo_info->free_archipelagos_list);
+	}
+
+	p_ooo_info->ooo_history.p_cqes =
+				kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
+					sizeof(struct ooo_opaque),
+					GFP_KERNEL);
+	if (!p_ooo_info->ooo_history.p_cqes) {
+		DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info(history)\n");
+		goto no_history_mem;
+	}
+
+	return p_ooo_info;
+
+no_history_mem:
+	kfree(p_ooo_info->p_archipelagos_mem);
+no_archipelagos_mem:
+	kfree(p_ooo_info->p_isles_mem);
+no_isles_mem:
+	kfree(p_ooo_info);
+	return NULL;
+}
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+				      struct qed_ooo_info *p_ooo_info, u32 cid)
+{
+	struct qed_ooo_archipelago *p_archipelago;
+	struct qed_ooo_buffer *p_buffer;
+	struct qed_ooo_isle *p_isle;
+	bool b_found = false;
+
+	if (list_empty(&p_ooo_info->archipelagos_list))
+		return;
+
+	list_for_each_entry(p_archipelago,
+			    &p_ooo_info->archipelagos_list, list_entry) {
+		if (p_archipelago->cid == cid) {
+			list_del(&p_archipelago->list_entry);
+			b_found = true;
+			break;
+		}
+	}
+
+	if (!b_found)
+		return;
+
+	while (!list_empty(&p_archipelago->isles_list)) {
+		p_isle = list_first_entry(&p_archipelago->isles_list,
+					  struct qed_ooo_isle, list_entry);
+
+		list_del(&p_isle->list_entry);
+
+		while (!list_empty(&p_isle->buffers_list)) {
+			p_buffer = list_first_entry(&p_isle->buffers_list,
+						    struct qed_ooo_buffer,
+						    list_entry);
+
+			if (!p_buffer)
+				break;
+
+			list_del(&p_buffer->list_entry);
+			list_add_tail(&p_buffer->list_entry,
+				      &p_ooo_info->free_buffers_list);
+		}
+		list_add_tail(&p_isle->list_entry,
+			      &p_ooo_info->free_isles_list);
+	}
+
+	list_add_tail(&p_archipelago->list_entry,
+		      &p_ooo_info->free_archipelagos_list);
+}
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+			       struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_archipelago *p_arch;
+	struct qed_ooo_buffer *p_buffer;
+	struct qed_ooo_isle *p_isle;
+
+	while (!list_empty(&p_ooo_info->archipelagos_list)) {
+		p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
+					  struct qed_ooo_archipelago,
+					  list_entry);
+
+		list_del(&p_arch->list_entry);
+
+		while (!list_empty(&p_arch->isles_list)) {
+			p_isle = list_first_entry(&p_arch->isles_list,
+						  struct qed_ooo_isle,
+						  list_entry);
+
+			list_del(&p_isle->list_entry);
+
+			while (!list_empty(&p_isle->buffers_list)) {
+				p_buffer =
+				    list_first_entry(&p_isle->buffers_list,
+						     struct qed_ooo_buffer,
+						     list_entry);
+
+				if (!p_buffer)
+					break;
+
+			list_del(&p_buffer->list_entry);
+				list_add_tail(&p_buffer->list_entry,
+					      &p_ooo_info->free_buffers_list);
+			}
+			list_add_tail(&p_isle->list_entry,
+				      &p_ooo_info->free_isles_list);
+		}
+		list_add_tail(&p_arch->list_entry,
+			      &p_ooo_info->free_archipelagos_list);
+	}
+	if (!list_empty(&p_ooo_info->ready_buffers_list))
+		list_splice_tail_init(&p_ooo_info->ready_buffers_list,
+				      &p_ooo_info->free_buffers_list);
+}
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+	qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+	memset(p_ooo_info->ooo_history.p_cqes, 0,
+	       p_ooo_info->ooo_history.num_of_cqes *
+	       sizeof(struct ooo_opaque));
+	p_ooo_info->ooo_history.head_idx = 0;
+}
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_buffer *p_buffer;
+
+	qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+	while (!list_empty(&p_ooo_info->free_buffers_list)) {
+		p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+					    struct qed_ooo_buffer, list_entry);
+
+		if (!p_buffer)
+			break;
+
+		list_del(&p_buffer->list_entry);
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_buffer->rx_buffer_size,
+				  p_buffer->rx_buffer_virt_addr,
+				  p_buffer->rx_buffer_phys_addr);
+		kfree(p_buffer);
+	}
+
+	kfree(p_ooo_info->p_isles_mem);
+	kfree(p_ooo_info->p_archipelagos_mem);
+	kfree(p_ooo_info->ooo_history.p_cqes);
+	kfree(p_ooo_info);
+}
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+			     struct qed_ooo_info *p_ooo_info,
+			     struct qed_ooo_buffer *p_buffer)
+{
+	list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+					       struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_buffer *p_buffer = NULL;
+
+	if (!list_empty(&p_ooo_info->free_buffers_list)) {
+		p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+					    struct qed_ooo_buffer, list_entry);
+
+		list_del(&p_buffer->list_entry);
+	}
+
+	return p_buffer;
+}
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+			      struct qed_ooo_info *p_ooo_info,
+			      struct qed_ooo_buffer *p_buffer, u8 on_tail)
+{
+	if (on_tail)
+		list_add_tail(&p_buffer->list_entry,
+			      &p_ooo_info->ready_buffers_list);
+	else
+		list_add(&p_buffer->list_entry,
+			 &p_ooo_info->ready_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+						struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_buffer *p_buffer = NULL;
+
+	if (!list_empty(&p_ooo_info->ready_buffers_list)) {
+		p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list,
+					    struct qed_ooo_buffer, list_entry);
+
+		list_del(&p_buffer->list_entry);
+	}
+
+	return p_buffer;
+}
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid, u8 drop_isle, u8 drop_size)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_isle = NULL;
+	u8 isle_idx;
+
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
+		p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
+		if (!p_isle) {
+			DP_NOTICE(p_hwfn,
+				  "Isle %d is not found(cid %d)\n",
+				  drop_isle, cid);
+			return;
+		}
+		if (list_empty(&p_isle->buffers_list))
+			DP_NOTICE(p_hwfn,
+				  "Isle %d is empty(cid %d)\n", drop_isle, cid);
+		else
+			list_splice_tail_init(&p_isle->buffers_list,
+					      &p_ooo_info->free_buffers_list);
+
+		list_del(&p_isle->list_entry);
+		p_ooo_info->cur_isles_number--;
+		list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
+	}
+
+	if (list_empty(&p_archipelago->isles_list)) {
+		list_del(&p_archipelago->list_entry);
+		list_add(&p_archipelago->list_entry,
+			 &p_ooo_info->free_archipelagos_list);
+	}
+}
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid, u8 ooo_isle,
+			  struct qed_ooo_buffer *p_buffer)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_prev_isle = NULL;
+	struct qed_ooo_isle *p_isle = NULL;
+
+	if (ooo_isle > 1) {
+		p_prev_isle = qed_ooo_seek_isle(p_hwfn,
+						p_ooo_info, cid, ooo_isle - 1);
+		if (!p_prev_isle) {
+			DP_NOTICE(p_hwfn,
+				  "Isle %d is not found(cid %d)\n",
+				  ooo_isle - 1, cid);
+			return;
+		}
+	}
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	if (!p_archipelago && (ooo_isle != 1)) {
+		DP_NOTICE(p_hwfn,
+			  "Connection %d is not found in OOO list\n", cid);
+		return;
+	}
+
+	if (!list_empty(&p_ooo_info->free_isles_list)) {
+		p_isle = list_first_entry(&p_ooo_info->free_isles_list,
+					  struct qed_ooo_isle, list_entry);
+
+		list_del(&p_isle->list_entry);
+		if (!list_empty(&p_isle->buffers_list)) {
+			DP_NOTICE(p_hwfn, "Free isle is not empty\n");
+			INIT_LIST_HEAD(&p_isle->buffers_list);
+		}
+	} else {
+		DP_NOTICE(p_hwfn, "No more free isles\n");
+		return;
+	}
+
+	if (!p_archipelago &&
+	    !list_empty(&p_ooo_info->free_archipelagos_list)) {
+		p_archipelago =
+		    list_first_entry(&p_ooo_info->free_archipelagos_list,
+				     struct qed_ooo_archipelago, list_entry);
+
+		list_del(&p_archipelago->list_entry);
+		if (!list_empty(&p_archipelago->isles_list)) {
+			DP_NOTICE(p_hwfn,
+				  "Free OOO connection is not empty\n");
+			INIT_LIST_HEAD(&p_archipelago->isles_list);
+		}
+		p_archipelago->cid = cid;
+		list_add(&p_archipelago->list_entry,
+			 &p_ooo_info->archipelagos_list);
+	} else if (!p_archipelago) {
+		DP_NOTICE(p_hwfn, "No more free OOO connections\n");
+		list_add(&p_isle->list_entry,
+			 &p_ooo_info->free_isles_list);
+		list_add(&p_buffer->list_entry,
+			 &p_ooo_info->free_buffers_list);
+		return;
+	}
+
+	list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+	p_ooo_info->cur_isles_number++;
+	p_ooo_info->gen_isles_number++;
+
+	if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
+		p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
+
+	if (!p_prev_isle)
+		list_add(&p_isle->list_entry, &p_archipelago->isles_list);
+	else
+		list_add(&p_isle->list_entry, &p_prev_isle->list_entry);
+}
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+			    struct qed_ooo_info *p_ooo_info,
+			    u32 cid,
+			    u8 ooo_isle,
+			    struct qed_ooo_buffer *p_buffer, u8 buffer_side)
+{
+	struct qed_ooo_isle *p_isle = NULL;
+
+	p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
+	if (!p_isle) {
+		DP_NOTICE(p_hwfn,
+			  "Isle %d is not found(cid %d)\n", ooo_isle, cid);
+		return;
+	}
+
+	if (buffer_side == QED_OOO_LEFT_BUF)
+		list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+	else
+		list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
+}
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+			struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_right_isle = NULL;
+	struct qed_ooo_isle *p_left_isle = NULL;
+
+	p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+					 left_isle + 1);
+	if (!p_right_isle) {
+		DP_NOTICE(p_hwfn,
+			  "Right isle %d is not found(cid %d)\n",
+			  left_isle + 1, cid);
+		return;
+	}
+
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	list_del(&p_right_isle->list_entry);
+	p_ooo_info->cur_isles_number--;
+	if (left_isle) {
+		p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+						left_isle);
+		if (!p_left_isle) {
+			DP_NOTICE(p_hwfn,
+				  "Left isle %d is not found(cid %d)\n",
+				  left_isle, cid);
+			return;
+		}
+		list_splice_tail_init(&p_right_isle->buffers_list,
+				      &p_left_isle->buffers_list);
+	} else {
+		list_splice_tail_init(&p_right_isle->buffers_list,
+				      &p_ooo_info->ready_buffers_list);
+		if (list_empty(&p_archipelago->isles_list)) {
+			list_del(&p_archipelago->list_entry);
+			list_add(&p_archipelago->list_entry,
+				 &p_ooo_info->free_archipelagos_list);
+		}
+	}
+	list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
new file mode 100644
index 0000000..75c6e48
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -0,0 +1,116 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_OOO_H
+#define _QED_OOO_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+#define QED_MAX_NUM_ISLES	256
+#define QED_MAX_NUM_OOO_HISTORY_ENTRIES	512
+
+#define QED_OOO_LEFT_BUF	0
+#define QED_OOO_RIGHT_BUF	1
+
+struct qed_ooo_buffer {
+	struct list_head list_entry;
+	void *rx_buffer_virt_addr;
+	dma_addr_t rx_buffer_phys_addr;
+	u32 rx_buffer_size;
+	u16 packet_length;
+	u16 parse_flags;
+	u16 vlan;
+	u8 placement_offset;
+};
+
+struct qed_ooo_isle {
+	struct list_head list_entry;
+	struct list_head buffers_list;
+};
+
+struct qed_ooo_archipelago {
+	struct list_head list_entry;
+	struct list_head isles_list;
+	u32 cid;
+};
+
+struct qed_ooo_history {
+	struct ooo_opaque *p_cqes;
+	u32 head_idx;
+	u32 num_of_cqes;
+};
+
+struct qed_ooo_info {
+	struct list_head free_buffers_list;
+	struct list_head ready_buffers_list;
+	struct list_head free_isles_list;
+	struct list_head free_archipelagos_list;
+	struct list_head archipelagos_list;
+	struct qed_ooo_archipelago *p_archipelagos_mem;
+	struct qed_ooo_isle *p_isles_mem;
+	struct qed_ooo_history ooo_history;
+	u32 cur_isles_number;
+	u32 max_isles_number;
+	u32 gen_isles_number;
+};
+
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+				struct qed_ooo_info *p_ooo_info,
+				struct ooo_opaque *p_cqe);
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+				      struct qed_ooo_info *p_ooo_info,
+				      u32 cid);
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+			       struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+			     struct qed_ooo_info *p_ooo_info,
+			     struct qed_ooo_buffer *p_buffer);
+
+struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+			struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+			      struct qed_ooo_info *p_ooo_info,
+			      struct qed_ooo_buffer *p_buffer, u8 on_tail);
+
+struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+			 struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid, u8 drop_isle, u8 drop_size);
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid,
+			  u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+			    struct qed_ooo_info *p_ooo_info,
+			    u32 cid,
+			    u8 ooo_isle,
+			    struct qed_ooo_buffer *p_buffer, u8 buffer_side);
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+			struct qed_ooo_info *p_ooo_info, u32 cid,
+			u8 left_isle);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 2343005..1768cdb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2866,6 +2866,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
 	/* Tx header */
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
 				       1 + pkt->n_seg, 0, flags, 0,
+				       QED_LL2_TX_DEST_NW,
 				       qed_roce_flavor, pkt->header.baddr,
 				       pkt->header.len, pkt, 1);
 	if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index d3fa578..b44fd4c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -26,6 +26,7 @@
 #include "qed_int.h"
 #include "qed_iscsi.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_sriov.h"
@@ -253,6 +254,14 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 	case PROTOCOLID_ISCSI:
 		if (!IS_ENABLED(CONFIG_QEDI))
 			return -EINVAL;
+		if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
+			u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
+
+			qed_ooo_release_connection_isles(p_hwfn,
+							 p_hwfn->p_ooo_info,
+							 cid);
+			return 0;
+		}
 
 		if (p_hwfn->p_iscsi_info->event_cb) {
 			struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-- 
1.8.3.1

WARNING: multiple messages have this Message-ID (diff)
From: <manish.rangankar@cavium.com>
To: lduncan@suse.com, cleech@redhat.com
Cc: martin.petersen@oracle.com, jejb@linux.vnet.ibm.com,
	linux-scsi@vger.kernel.org, netdev@vger.kernel.org,
	Yuval.Mintz@cavium.com, QLogic-Storage-Upstream@cavium.com,
	Yuval Mintz <Yuval.Mintz@qlogic.com>,
	Arun Easi <arun.easi@cavium.com>,
	Yuval Mintz <yuval.mintz@cavium.com>
Subject: [RFC 2/6] qed: Add iSCSI out of order packet handling.
Date: Wed, 19 Oct 2016 01:01:09 -0400	[thread overview]
Message-ID: <1476853273-22960-3-git-send-email-manish.rangankar@cavium.com> (raw)
In-Reply-To: <1476853273-22960-1-git-send-email-manish.rangankar@cavium.com>

From: Yuval Mintz <Yuval.Mintz@qlogic.com>

This patch adds out of order packet handling for hardware offloaded
iSCSI. Out of order packet handling requires driver buffer allocation
and assistance.

Signed-off-by: Arun Easi <arun.easi@cavium.com>
Signed-off-by: Yuval Mintz <yuval.mintz@cavium.com>
---
 drivers/net/ethernet/qlogic/qed/Makefile   |   2 +-
 drivers/net/ethernet/qlogic/qed/qed.h      |   1 +
 drivers/net/ethernet/qlogic/qed/qed_dev.c  |  14 +-
 drivers/net/ethernet/qlogic/qed/qed_ll2.c  | 559 +++++++++++++++++++++++++++--
 drivers/net/ethernet/qlogic/qed/qed_ll2.h  |   9 +
 drivers/net/ethernet/qlogic/qed/qed_ooo.c  | 510 ++++++++++++++++++++++++++
 drivers/net/ethernet/qlogic/qed/qed_ooo.h  | 116 ++++++
 drivers/net/ethernet/qlogic/qed/qed_roce.c |   1 +
 drivers/net/ethernet/qlogic/qed/qed_spq.c  |   9 +
 9 files changed, 1195 insertions(+), 26 deletions(-)
 create mode 100644 drivers/net/ethernet/qlogic/qed/qed_ooo.c
 create mode 100644 drivers/net/ethernet/qlogic/qed/qed_ooo.h

diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index b76669c..9121bf0 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -6,4 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
 qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
 qed-$(CONFIG_QED_LL2) += qed_ll2.o
 qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
-qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index a61b1c0..e5626ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -380,6 +380,7 @@ struct qed_hwfn {
 	/* Protocol related */
 	bool				using_ll2;
 	struct qed_ll2_info		*p_ll2_info;
+	struct qed_ooo_info		*p_ooo_info;
 	struct qed_rdma_info		*p_rdma_info;
 	struct qed_iscsi_info		*p_iscsi_info;
 	struct qed_pf_params		pf_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a4234c0..060e9a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -32,6 +32,7 @@
 #include "qed_iscsi.h"
 #include "qed_ll2.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_sriov.h"
@@ -157,8 +158,10 @@ void qed_resc_free(struct qed_dev *cdev)
 		qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
 #endif
 		if (IS_ENABLED(CONFIG_QEDI) &&
-				p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+				p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
 			qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
+			qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+		}
 		qed_iov_free(p_hwfn);
 		qed_dmae_info_free(p_hwfn);
 		qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -416,6 +419,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 int qed_resc_alloc(struct qed_dev *cdev)
 {
 	struct qed_iscsi_info *p_iscsi_info;
+	struct qed_ooo_info *p_ooo_info;
 #ifdef CONFIG_QED_LL2
 	struct qed_ll2_info *p_ll2_info;
 #endif
@@ -543,6 +547,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
 			if (!p_iscsi_info)
 				goto alloc_no_mem;
 			p_hwfn->p_iscsi_info = p_iscsi_info;
+			p_ooo_info = qed_ooo_alloc(p_hwfn);
+			if (!p_ooo_info)
+				goto alloc_no_mem;
+			p_hwfn->p_ooo_info = p_ooo_info;
 		}
 
 		/* DMA info initialization */
@@ -598,8 +606,10 @@ void qed_resc_setup(struct qed_dev *cdev)
 			qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
 #endif
 		if (IS_ENABLED(CONFIG_QEDI) &&
-				p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+				p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
 			qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
+			qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
+		}
 	}
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index e67f3c9..4ce12e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -36,6 +36,7 @@
 #include "qed_int.h"
 #include "qed_ll2.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 
@@ -295,27 +296,36 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 		list_del(&p_pkt->list_entry);
 		b_last_packet = list_empty(&p_tx->active_descq);
 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
-		p_tx->cur_completing_packet = *p_pkt;
-		p_tx->cur_completing_bd_idx = 1;
-		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
-		tx_frag = p_pkt->bds_set[0].tx_frag;
+		if (IS_ENABLED(CONFIG_QEDI) &&
+			p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+			struct qed_ooo_buffer *p_buffer;
+
+			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+		} else {
+			p_tx->cur_completing_packet = *p_pkt;
+			p_tx->cur_completing_bd_idx = 1;
+			b_last_frag = p_tx->cur_completing_bd_idx ==
+				p_pkt->bd_used;
+			tx_frag = p_pkt->bds_set[0].tx_frag;
 #if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
-		if (p_ll2_conn->gsi_enable)
-			qed_ll2b_release_tx_gsi_packet(p_hwfn,
-						       p_ll2_conn->my_id,
-						       p_pkt->cookie,
-						       tx_frag,
-						       b_last_frag,
-						       b_last_packet);
-		else
+			if (p_ll2_conn->gsi_enable)
+				qed_ll2b_release_tx_gsi_packet(p_hwfn,
+					       p_ll2_conn->my_id,
+					       p_pkt->cookie,
+					       tx_frag,
+					       b_last_frag,
+					       b_last_packet);
+			else
 #endif
-			qed_ll2b_complete_tx_packet(p_hwfn,
+				qed_ll2b_complete_tx_packet(p_hwfn,
 						    p_ll2_conn->my_id,
 						    p_pkt->cookie,
 						    tx_frag,
 						    b_last_frag,
 						    b_last_packet);
-
+		}
 	}
 }
 
@@ -546,13 +556,466 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 		list_del(&p_pkt->list_entry);
 		list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-		rx_buf_addr = p_pkt->rx_buf_addr;
-		cookie = p_pkt->cookie;
+		if (IS_ENABLED(CONFIG_QEDI) &&
+			p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+			struct qed_ooo_buffer *p_buffer;
+
+			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+		} else {
+			rx_buf_addr = p_pkt->rx_buf_addr;
+			cookie = p_pkt->cookie;
+
+			b_last = list_empty(&p_rx->active_descq);
+		}
+	}
+}
+
+#if IS_ENABLED(CONFIG_QEDI)
+static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+{
+	u8 bd_flags = 0;
+
+	if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+
+	return bd_flags;
+}
+
+static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+	u16 packet_length = 0, parse_flags = 0, vlan = 0;
+	struct qed_ll2_rx_packet *p_pkt = NULL;
+	u32 num_ooo_add_to_peninsula = 0, cid;
+	union core_rx_cqe_union *cqe = NULL;
+	u16 cq_new_idx = 0, cq_old_idx = 0;
+	struct qed_ooo_buffer *p_buffer;
+	struct ooo_opaque *iscsi_ooo;
+	u8 placement_offset = 0;
+	u8 cqe_type;
+	int rc;
+
+	cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+	cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+	if (cq_new_idx == cq_old_idx)
+		return 0;
+
+	while (cq_new_idx != cq_old_idx) {
+		struct core_rx_fast_path_cqe *p_cqe_fp;
+
+		cqe = qed_chain_consume(&p_rx->rcq_chain);
+		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+		cqe_type = cqe->rx_cqe_sp.type;
+
+		if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+			DP_NOTICE(p_hwfn,
+				  "Got a non-regular LB LL2 completion [type 0x%02x]\n",
+				  cqe_type);
+			return -EINVAL;
+		}
+		p_cqe_fp = &cqe->rx_cqe_fp;
+
+		placement_offset = p_cqe_fp->placement_offset;
+		parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
+		packet_length = le16_to_cpu(p_cqe_fp->packet_length);
+		vlan = le16_to_cpu(p_cqe_fp->vlan);
+		iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+		qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
+					   iscsi_ooo);
+		cid = le32_to_cpu(iscsi_ooo->cid);
+
+		/* Process delete isle first */
+		if (iscsi_ooo->drop_size)
+			qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
+					     iscsi_ooo->drop_isle,
+					     iscsi_ooo->drop_size);
+
+		if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+			continue;
+
+		/* Now process create/add/join isles */
+		if (list_empty(&p_rx->active_descq)) {
+			DP_NOTICE(p_hwfn,
+				  "LL2 OOO RX chain has no submitted buffers\n");
+			return -EIO;
+		}
+
+		p_pkt = list_first_entry(&p_rx->active_descq,
+					 struct qed_ll2_rx_packet, list_entry);
+
+		if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
+		    (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
+			if (!p_pkt) {
+				DP_NOTICE(p_hwfn,
+					  "LL2 OOO RX packet is not valid\n");
+				return -EIO;
+			}
+			list_del(&p_pkt->list_entry);
+			p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+			p_buffer->packet_length = packet_length;
+			p_buffer->parse_flags = parse_flags;
+			p_buffer->vlan = vlan;
+			p_buffer->placement_offset = placement_offset;
+			qed_chain_consume(&p_rx->rxq_chain);
+			list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+			switch (iscsi_ooo->ooo_opcode) {
+			case TCP_EVENT_ADD_NEW_ISLE:
+				qed_ooo_add_new_isle(p_hwfn,
+						     p_hwfn->p_ooo_info,
+						     cid,
+						     iscsi_ooo->ooo_isle,
+						     p_buffer);
+				break;
+			case TCP_EVENT_ADD_ISLE_RIGHT:
+				qed_ooo_add_new_buffer(p_hwfn,
+						       p_hwfn->p_ooo_info,
+						       cid,
+						       iscsi_ooo->ooo_isle,
+						       p_buffer,
+						       QED_OOO_RIGHT_BUF);
+				break;
+			case TCP_EVENT_ADD_ISLE_LEFT:
+				qed_ooo_add_new_buffer(p_hwfn,
+						       p_hwfn->p_ooo_info,
+						       cid,
+						       iscsi_ooo->ooo_isle,
+						       p_buffer,
+						       QED_OOO_LEFT_BUF);
+				break;
+			case TCP_EVENT_JOIN:
+				qed_ooo_add_new_buffer(p_hwfn,
+						       p_hwfn->p_ooo_info,
+						       cid,
+						       iscsi_ooo->ooo_isle +
+						       1,
+						       p_buffer,
+						       QED_OOO_LEFT_BUF);
+				qed_ooo_join_isles(p_hwfn,
+						   p_hwfn->p_ooo_info,
+						   cid, iscsi_ooo->ooo_isle);
+				break;
+			case TCP_EVENT_ADD_PEN:
+				num_ooo_add_to_peninsula++;
+				qed_ooo_put_ready_buffer(p_hwfn,
+							 p_hwfn->p_ooo_info,
+							 p_buffer, true);
+				break;
+			}
+		} else {
+			DP_NOTICE(p_hwfn,
+				  "Unexpected event (%d) TX OOO completion\n",
+				  iscsi_ooo->ooo_opcode);
+		}
+	}
 
-		b_last = list_empty(&p_rx->active_descq);
+	/* Submit RX buffer here */
+	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+						   p_hwfn->p_ooo_info))) {
+		rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
+					    p_buffer->rx_buffer_phys_addr,
+					    0, p_buffer, true);
+		if (rc) {
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+			break;
+		}
 	}
+
+	/* Submit Tx buffers here */
+	while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
+						    p_hwfn->p_ooo_info))) {
+		u16 l4_hdr_offset_w = 0;
+		dma_addr_t first_frag;
+		u8 bd_flags = 0;
+
+		first_frag = p_buffer->rx_buffer_phys_addr +
+			     p_buffer->placement_offset;
+		parse_flags = p_buffer->parse_flags;
+		bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+
+		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
+					       p_buffer->vlan, bd_flags,
+					       l4_hdr_offset_w,
+					       p_ll2_conn->tx_dest, 0,
+					       first_frag,
+					       p_buffer->packet_length,
+					       p_buffer, true);
+		if (rc) {
+			qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						 p_buffer, false);
+			break;
+		}
+	}
+
+	return 0;
 }
 
+static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+	struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+	struct qed_ll2_tx_packet *p_pkt = NULL;
+	struct qed_ooo_buffer *p_buffer;
+	bool b_dont_submit_rx = false;
+	u16 new_idx = 0, num_bds = 0;
+	int rc;
+
+	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+
+	if (!num_bds)
+		return 0;
+
+	while (num_bds) {
+		if (list_empty(&p_tx->active_descq))
+			return -EINVAL;
+
+		p_pkt = list_first_entry(&p_tx->active_descq,
+					 struct qed_ll2_tx_packet, list_entry);
+		if (!p_pkt)
+			return -EINVAL;
+
+		if (p_pkt->bd_used != 1) {
+			DP_NOTICE(p_hwfn,
+				  "Unexpectedly many BDs(%d) in TX OOO completion\n",
+				  p_pkt->bd_used);
+			return -EINVAL;
+		}
+
+		list_del(&p_pkt->list_entry);
+
+		num_bds--;
+		p_tx->bds_idx++;
+		qed_chain_consume(&p_tx->txq_chain);
+
+		p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+		if (b_dont_submit_rx) {
+			qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						p_buffer);
+			continue;
+		}
+
+		rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
+					    p_buffer->rx_buffer_phys_addr, 0,
+					    p_buffer, true);
+		if (rc != 0) {
+			qed_ooo_put_free_buffer(p_hwfn,
+						p_hwfn->p_ooo_info, p_buffer);
+			b_dont_submit_rx = true;
+		}
+	}
+
+	/* Submit Tx buffers here */
+	while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
+						    p_hwfn->p_ooo_info))) {
+		u16 l4_hdr_offset_w = 0, parse_flags = p_buffer->parse_flags;
+		dma_addr_t first_frag;
+		u8 bd_flags = 0;
+
+		first_frag = p_buffer->rx_buffer_phys_addr +
+		    p_buffer->placement_offset;
+		bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
+					       p_buffer->vlan, bd_flags,
+					       l4_hdr_offset_w,
+					       p_ll2_conn->tx_dest, 0,
+					       first_frag,
+					       p_buffer->packet_length,
+					       p_buffer, true);
+		if (rc != 0) {
+			qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
+						 p_buffer, false);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+			       struct qed_ll2_info *p_ll2_info,
+			       u16 rx_num_ooo_buffers, u16 mtu)
+{
+	struct qed_ooo_buffer *p_buf = NULL;
+	void *p_virt;
+	u16 buf_idx;
+	int rc = 0;
+
+	if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+		return rc;
+
+	if (!rx_num_ooo_buffers)
+		return -EINVAL;
+
+	for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
+		p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+		if (!p_buf) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to allocate ooo descriptor\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+		p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+					 ETH_CACHE_LINE_SIZE - 1) &
+					~(ETH_CACHE_LINE_SIZE - 1);
+		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+					    p_buf->rx_buffer_size,
+					    &p_buf->rx_buffer_phys_addr,
+					    GFP_KERNEL);
+		if (!p_virt) {
+			DP_NOTICE(p_hwfn, "Failed to allocate ooo buffer\n");
+			kfree(p_buf);
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		p_buf->rx_buffer_virt_addr = p_virt;
+		qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+		   "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+		   rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
+	return rc;
+}
+
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+				 struct qed_ll2_info *p_ll2_conn)
+{
+	struct qed_ooo_buffer *p_buffer;
+	int rc;
+
+	if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+		return;
+
+	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+						   p_hwfn->p_ooo_info))) {
+		rc = qed_ll2_post_rx_buffer(p_hwfn,
+					    p_ll2_conn->my_id,
+					    p_buffer->rx_buffer_phys_addr,
+					    0, p_buffer, true);
+		if (rc) {
+			qed_ooo_put_free_buffer(p_hwfn,
+						p_hwfn->p_ooo_info, p_buffer);
+			break;
+		}
+	}
+}
+
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+					   struct qed_ll2_info *p_ll2_conn)
+{
+	struct qed_ooo_buffer *p_buffer;
+
+	if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+		return;
+
+	qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+	while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+						   p_hwfn->p_ooo_info))) {
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_buffer->rx_buffer_size,
+				  p_buffer->rx_buffer_virt_addr,
+				  p_buffer->rx_buffer_phys_addr);
+		kfree(p_buffer);
+	}
+}
+
+static void qed_ll2_stop_ooo(struct qed_dev *cdev)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+
+	DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
+		   *handle);
+
+	qed_ll2_terminate_connection(hwfn, *handle);
+	qed_ll2_release_connection(hwfn, *handle);
+	*handle = QED_LL2_UNUSED_HANDLE;
+}
+
+static int qed_ll2_start_ooo(struct qed_dev *cdev,
+			     struct qed_ll2_params *params)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+	struct qed_ll2_info *ll2_info;
+	int rc;
+
+	ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
+	if (!ll2_info) {
+		DP_INFO(cdev, "Failed to allocate LL2 info buffer\n");
+		return -ENOMEM;
+	}
+	ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
+	ll2_info->mtu = params->mtu;
+	ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
+	ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
+	ll2_info->tx_tc = OOO_LB_TC;
+	ll2_info->tx_dest = CORE_TX_DEST_LB;
+
+	rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+					QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+					handle);
+	kfree(ll2_info);
+	if (rc) {
+		DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+		goto out;
+	}
+
+	rc = qed_ll2_establish_connection(hwfn, *handle);
+	if (rc) {
+		DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	qed_ll2_release_connection(hwfn, *handle);
+out:
+	*handle = QED_LL2_UNUSED_HANDLE;
+	return rc;
+}
+#else /* IS_ENABLED(CONFIG_QEDI) */
+static inline int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
+		void *p_cookie) { return -EINVAL; }
+static inline int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
+		void *p_cookie) { return -EINVAL; }
+static inline int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+			struct qed_ll2_info *p_ll2_info,
+			u16 rx_num_ooo_buffers, u16 mtu) { return -EINVAL; }
+static inline void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+			struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+			struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
+static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
+			struct qed_ll2_params *params) { return -EINVAL; }
+#endif /* IS_ENABLED(CONFIG_QEDI) */
+
 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
 				     struct qed_ll2_info *p_ll2_conn,
 				     u8 action_on_error)
@@ -594,7 +1057,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
 	p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
 	p_ramrod->queue_id = p_ll2_conn->queue_id;
-	p_ramrod->main_func_queue = 1;
+	p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
+									  : 1;
 
 	if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
 	    p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
@@ -625,6 +1089,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
 		return 0;
 
+	if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+		p_ll2_conn->tx_stats_en = 0;
+	else
+		p_ll2_conn->tx_stats_en = 1;
+
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
 	init_data.cid = p_ll2_conn->cid;
@@ -642,7 +1111,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
 	p_ramrod->sb_index = p_tx->tx_sb_index;
 	p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
-	p_ll2_conn->tx_stats_en = 1;
 	p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
 	p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
 
@@ -866,9 +1334,22 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
 	if (rc)
 		goto q_allocate_fail;
 
+	if (IS_ENABLED(CONFIG_QEDI)) {
+		rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
+					    rx_num_desc * 2, p_params->mtu);
+		if (rc)
+			goto q_allocate_fail;
+	}
+
 	/* Register callbacks for the Rx/Tx queues */
-	comp_rx_cb = qed_ll2_rxq_completion;
-	comp_tx_cb = qed_ll2_txq_completion;
+	if (IS_ENABLED(CONFIG_QEDI) &&
+			p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+		comp_rx_cb = qed_ll2_lb_rxq_completion;
+		comp_tx_cb = qed_ll2_lb_txq_completion;
+	} else {
+		comp_rx_cb = qed_ll2_rxq_completion;
+		comp_tx_cb = qed_ll2_txq_completion;
+	}
 
 	if (rx_num_desc) {
 		qed_int_register_cb(p_hwfn, comp_rx_cb,
@@ -981,6 +1462,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 	if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
 
+	if (IS_ENABLED(CONFIG_QEDI))
+		qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
+
 	return rc;
 }
 
@@ -1223,6 +1707,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 			      u16 vlan,
 			      u8 bd_flags,
 			      u16 l4_hdr_offset_w,
+			      enum qed_ll2_tx_dest e_tx_dest,
 			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
 			      dma_addr_t first_frag,
 			      u16 first_frag_len, void *cookie, u8 notify_fw)
@@ -1232,6 +1717,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 	enum core_roce_flavor_type roce_flavor;
 	struct qed_ll2_tx_queue *p_tx;
 	struct qed_chain *p_tx_chain;
+	enum core_tx_dest tx_dest;
 	unsigned long flags;
 	int rc = 0;
 
@@ -1262,6 +1748,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 		goto out;
 	}
 
+	tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
+						    CORE_TX_DEST_LB;
 	if (qed_roce_flavor == QED_LL2_ROCE) {
 		roce_flavor = CORE_ROCE;
 	} else if (qed_roce_flavor == QED_LL2_RROCE) {
@@ -1276,7 +1764,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 				      num_of_bds, first_frag,
 				      first_frag_len, cookie, notify_fw);
 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
-					 num_of_bds, CORE_TX_DEST_NW,
+					 num_of_bds, tx_dest,
 					 vlan, bd_flags, l4_hdr_offset_w,
 					 roce_flavor,
 					 first_frag, first_frag_len);
@@ -1351,6 +1839,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 		qed_ll2_rxq_flush(p_hwfn, connection_handle);
 	}
 
+	if (IS_ENABLED(CONFIG_QEDI) &&
+			p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+		qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+
 	return rc;
 }
 
@@ -1381,6 +1873,9 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
 	qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
 
+	if (IS_ENABLED(CONFIG_QEDI))
+		qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
+
 	mutex_lock(&p_ll2_conn->mutex);
 	p_ll2_conn->b_active = false;
 	mutex_unlock(&p_ll2_conn->mutex);
@@ -1628,6 +2123,18 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 		goto release_terminate;
 	}
 
+	if (IS_ENABLED(CONFIG_QEDI) &&
+		(cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI) &&
+		cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+		DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
+		rc = qed_ll2_start_ooo(cdev, params);
+		if (rc) {
+			DP_INFO(cdev,
+				"Failed to initialize the OOO LL2 queue\n");
+			goto release_terminate;
+		}
+	}
+
 	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 	if (!p_ptt) {
 		DP_INFO(cdev, "Failed to acquire PTT\n");
@@ -1677,6 +2184,11 @@ static int qed_ll2_stop(struct qed_dev *cdev)
 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
 	eth_zero_addr(cdev->ll2_mac_address);
 
+	if (IS_ENABLED(CONFIG_QEDI) &&
+		(cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI) &&
+		cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+		qed_ll2_stop_ooo(cdev);
+
 	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
 					  cdev->ll2->handle);
 	if (rc)
@@ -1731,7 +2243,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
 				       cdev->ll2->handle,
 				       1 + skb_shinfo(skb)->nr_frags,
-				       vlan, flags, 0, 0 /* RoCE FLAVOR */,
+				       vlan, flags, 0, QED_LL2_TX_DEST_NW,
+				       0 /* RoCE FLAVOR */,
 				       mapping, skb->len, skb, 1);
 	if (rc)
 		goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2..2b31d30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -41,6 +41,12 @@ enum qed_ll2_conn_type {
 	MAX_QED_LL2_RX_CONN_TYPE
 };
 
+enum qed_ll2_tx_dest {
+	QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
+	QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
+	QED_LL2_TX_DEST_MAX
+};
+
 struct qed_ll2_rx_packet {
 	struct list_head list_entry;
 	struct core_rx_bd_with_buff_len *rxq_bd;
@@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
  * @param l4_hdr_offset_w	L4 Header Offset from start of packet
  *				(in words). This is needed if both l4_csum
  *				and ipv6_ext are set
+ * @param e_tx_dest             indicates if the packet is to be transmitted via
+ *                              loopback or to the network
  * @param first_frag
  * @param first_frag_len
  * @param cookie
@@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 			      u16 vlan,
 			      u8 bd_flags,
 			      u16 l4_hdr_offset_w,
+			      enum qed_ll2_tx_dest e_tx_dest,
 			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
 			      dma_addr_t first_frag,
 			      u16 first_frag_len, void *cookie, u8 notify_fw);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
new file mode 100644
index 0000000..a037a6f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -0,0 +1,510 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_ooo.h"
+
+static struct qed_ooo_archipelago
+*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info
+			  *p_ooo_info,
+			  u32 cid)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+
+	list_for_each_entry(p_archipelago,
+			    &p_ooo_info->archipelagos_list, list_entry) {
+		if (p_archipelago->cid == cid)
+			return p_archipelago;
+	}
+
+	return NULL;
+}
+
+static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
+					      struct qed_ooo_info *p_ooo_info,
+					      u32 cid, u8 isle)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_isle = NULL;
+	u8 the_num_of_isle = 1;
+
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	if (!p_archipelago) {
+		DP_NOTICE(p_hwfn,
+			  "Connection %d is not found in OOO list\n", cid);
+		return NULL;
+	}
+
+	list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) {
+		if (the_num_of_isle == isle)
+			return p_isle;
+		the_num_of_isle++;
+	}
+
+	return NULL;
+}
+
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+				struct qed_ooo_info *p_ooo_info,
+				struct ooo_opaque *p_cqe)
+{
+	struct qed_ooo_history *p_history = &p_ooo_info->ooo_history;
+
+	if (p_history->head_idx == p_history->num_of_cqes)
+		p_history->head_idx = 0;
+	p_history->p_cqes[p_history->head_idx] = *p_cqe;
+	p_history->head_idx++;
+}
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ooo_info *p_ooo_info;
+	u16 max_num_archipelagos = 0;
+	u16 max_num_isles = 0;
+	u32 i;
+
+	if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate qed_ooo_info: unknown personality\n");
+		return NULL;
+	}
+
+	max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
+	max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+
+	if (!max_num_archipelagos) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate qed_ooo_info: unknown amount of connections\n");
+		return NULL;
+	}
+
+	p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
+	if (!p_ooo_info) {
+		DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
+	INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
+	INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
+	INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
+	INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
+
+	p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
+					  sizeof(struct qed_ooo_isle),
+					  GFP_KERNEL);
+	if (!p_ooo_info->p_isles_mem) {
+		DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info(isles)\n");
+		goto no_isles_mem;
+	}
+
+	for (i = 0; i < max_num_isles; i++) {
+		INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list);
+		list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry,
+			      &p_ooo_info->free_isles_list);
+	}
+
+	p_ooo_info->p_archipelagos_mem =
+				kcalloc(max_num_archipelagos,
+					sizeof(struct qed_ooo_archipelago),
+					GFP_KERNEL);
+	if (!p_ooo_info->p_archipelagos_mem) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate qed_ooo_info(archpelagos)\n");
+		goto no_archipelagos_mem;
+	}
+
+	for (i = 0; i < max_num_archipelagos; i++) {
+		INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
+		list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
+			      &p_ooo_info->free_archipelagos_list);
+	}
+
+	p_ooo_info->ooo_history.p_cqes =
+				kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
+					sizeof(struct ooo_opaque),
+					GFP_KERNEL);
+	if (!p_ooo_info->ooo_history.p_cqes) {
+		DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info(history)\n");
+		goto no_history_mem;
+	}
+
+	return p_ooo_info;
+
+no_history_mem:
+	kfree(p_ooo_info->p_archipelagos_mem);
+no_archipelagos_mem:
+	kfree(p_ooo_info->p_isles_mem);
+no_isles_mem:
+	kfree(p_ooo_info);
+	return NULL;
+}
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+				      struct qed_ooo_info *p_ooo_info, u32 cid)
+{
+	struct qed_ooo_archipelago *p_archipelago;
+	struct qed_ooo_buffer *p_buffer;
+	struct qed_ooo_isle *p_isle;
+	bool b_found = false;
+
+	if (list_empty(&p_ooo_info->archipelagos_list))
+		return;
+
+	list_for_each_entry(p_archipelago,
+			    &p_ooo_info->archipelagos_list, list_entry) {
+		if (p_archipelago->cid == cid) {
+			list_del(&p_archipelago->list_entry);
+			b_found = true;
+			break;
+		}
+	}
+
+	if (!b_found)
+		return;
+
+	while (!list_empty(&p_archipelago->isles_list)) {
+		p_isle = list_first_entry(&p_archipelago->isles_list,
+					  struct qed_ooo_isle, list_entry);
+
+		list_del(&p_isle->list_entry);
+
+		while (!list_empty(&p_isle->buffers_list)) {
+			p_buffer = list_first_entry(&p_isle->buffers_list,
+						    struct qed_ooo_buffer,
+						    list_entry);
+
+			if (!p_buffer)
+				break;
+
+			list_del(&p_buffer->list_entry);
+			list_add_tail(&p_buffer->list_entry,
+				      &p_ooo_info->free_buffers_list);
+		}
+		list_add_tail(&p_isle->list_entry,
+			      &p_ooo_info->free_isles_list);
+	}
+
+	list_add_tail(&p_archipelago->list_entry,
+		      &p_ooo_info->free_archipelagos_list);
+}
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+			       struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_archipelago *p_arch;
+	struct qed_ooo_buffer *p_buffer;
+	struct qed_ooo_isle *p_isle;
+
+	while (!list_empty(&p_ooo_info->archipelagos_list)) {
+		p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
+					  struct qed_ooo_archipelago,
+					  list_entry);
+
+		list_del(&p_arch->list_entry);
+
+		while (!list_empty(&p_arch->isles_list)) {
+			p_isle = list_first_entry(&p_arch->isles_list,
+						  struct qed_ooo_isle,
+						  list_entry);
+
+			list_del(&p_isle->list_entry);
+
+			while (!list_empty(&p_isle->buffers_list)) {
+				p_buffer =
+				    list_first_entry(&p_isle->buffers_list,
+						     struct qed_ooo_buffer,
+						     list_entry);
+
+				if (!p_buffer)
+					break;
+
+			list_del(&p_buffer->list_entry);
+				list_add_tail(&p_buffer->list_entry,
+					      &p_ooo_info->free_buffers_list);
+			}
+			list_add_tail(&p_isle->list_entry,
+				      &p_ooo_info->free_isles_list);
+		}
+		list_add_tail(&p_arch->list_entry,
+			      &p_ooo_info->free_archipelagos_list);
+	}
+	if (!list_empty(&p_ooo_info->ready_buffers_list))
+		list_splice_tail_init(&p_ooo_info->ready_buffers_list,
+				      &p_ooo_info->free_buffers_list);
+}
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+	qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+	memset(p_ooo_info->ooo_history.p_cqes, 0,
+	       p_ooo_info->ooo_history.num_of_cqes *
+	       sizeof(struct ooo_opaque));
+	p_ooo_info->ooo_history.head_idx = 0;
+}
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_buffer *p_buffer;
+
+	qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+	while (!list_empty(&p_ooo_info->free_buffers_list)) {
+		p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+					    struct qed_ooo_buffer, list_entry);
+
+		if (!p_buffer)
+			break;
+
+		list_del(&p_buffer->list_entry);
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_buffer->rx_buffer_size,
+				  p_buffer->rx_buffer_virt_addr,
+				  p_buffer->rx_buffer_phys_addr);
+		kfree(p_buffer);
+	}
+
+	kfree(p_ooo_info->p_isles_mem);
+	kfree(p_ooo_info->p_archipelagos_mem);
+	kfree(p_ooo_info->ooo_history.p_cqes);
+	kfree(p_ooo_info);
+}
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+			     struct qed_ooo_info *p_ooo_info,
+			     struct qed_ooo_buffer *p_buffer)
+{
+	list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+					       struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_buffer *p_buffer = NULL;
+
+	if (!list_empty(&p_ooo_info->free_buffers_list)) {
+		p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+					    struct qed_ooo_buffer, list_entry);
+
+		list_del(&p_buffer->list_entry);
+	}
+
+	return p_buffer;
+}
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+			      struct qed_ooo_info *p_ooo_info,
+			      struct qed_ooo_buffer *p_buffer, u8 on_tail)
+{
+	if (on_tail)
+		list_add_tail(&p_buffer->list_entry,
+			      &p_ooo_info->ready_buffers_list);
+	else
+		list_add(&p_buffer->list_entry,
+			 &p_ooo_info->ready_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+						struct qed_ooo_info *p_ooo_info)
+{
+	struct qed_ooo_buffer *p_buffer = NULL;
+
+	if (!list_empty(&p_ooo_info->ready_buffers_list)) {
+		p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list,
+					    struct qed_ooo_buffer, list_entry);
+
+		list_del(&p_buffer->list_entry);
+	}
+
+	return p_buffer;
+}
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid, u8 drop_isle, u8 drop_size)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_isle = NULL;
+	u8 isle_idx;
+
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
+		p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
+		if (!p_isle) {
+			DP_NOTICE(p_hwfn,
+				  "Isle %d is not found(cid %d)\n",
+				  drop_isle, cid);
+			return;
+		}
+		if (list_empty(&p_isle->buffers_list))
+			DP_NOTICE(p_hwfn,
+				  "Isle %d is empty(cid %d)\n", drop_isle, cid);
+		else
+			list_splice_tail_init(&p_isle->buffers_list,
+					      &p_ooo_info->free_buffers_list);
+
+		list_del(&p_isle->list_entry);
+		p_ooo_info->cur_isles_number--;
+		list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
+	}
+
+	if (list_empty(&p_archipelago->isles_list)) {
+		list_del(&p_archipelago->list_entry);
+		list_add(&p_archipelago->list_entry,
+			 &p_ooo_info->free_archipelagos_list);
+	}
+}
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid, u8 ooo_isle,
+			  struct qed_ooo_buffer *p_buffer)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_prev_isle = NULL;
+	struct qed_ooo_isle *p_isle = NULL;
+
+	if (ooo_isle > 1) {
+		p_prev_isle = qed_ooo_seek_isle(p_hwfn,
+						p_ooo_info, cid, ooo_isle - 1);
+		if (!p_prev_isle) {
+			DP_NOTICE(p_hwfn,
+				  "Isle %d is not found(cid %d)\n",
+				  ooo_isle - 1, cid);
+			return;
+		}
+	}
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	if (!p_archipelago && (ooo_isle != 1)) {
+		DP_NOTICE(p_hwfn,
+			  "Connection %d is not found in OOO list\n", cid);
+		return;
+	}
+
+	if (!list_empty(&p_ooo_info->free_isles_list)) {
+		p_isle = list_first_entry(&p_ooo_info->free_isles_list,
+					  struct qed_ooo_isle, list_entry);
+
+		list_del(&p_isle->list_entry);
+		if (!list_empty(&p_isle->buffers_list)) {
+			DP_NOTICE(p_hwfn, "Free isle is not empty\n");
+			INIT_LIST_HEAD(&p_isle->buffers_list);
+		}
+	} else {
+		DP_NOTICE(p_hwfn, "No more free isles\n");
+		return;
+	}
+
+	if (!p_archipelago &&
+	    !list_empty(&p_ooo_info->free_archipelagos_list)) {
+		p_archipelago =
+		    list_first_entry(&p_ooo_info->free_archipelagos_list,
+				     struct qed_ooo_archipelago, list_entry);
+
+		list_del(&p_archipelago->list_entry);
+		if (!list_empty(&p_archipelago->isles_list)) {
+			DP_NOTICE(p_hwfn,
+				  "Free OOO connection is not empty\n");
+			INIT_LIST_HEAD(&p_archipelago->isles_list);
+		}
+		p_archipelago->cid = cid;
+		list_add(&p_archipelago->list_entry,
+			 &p_ooo_info->archipelagos_list);
+	} else if (!p_archipelago) {
+		DP_NOTICE(p_hwfn, "No more free OOO connections\n");
+		list_add(&p_isle->list_entry,
+			 &p_ooo_info->free_isles_list);
+		list_add(&p_buffer->list_entry,
+			 &p_ooo_info->free_buffers_list);
+		return;
+	}
+
+	list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+	p_ooo_info->cur_isles_number++;
+	p_ooo_info->gen_isles_number++;
+
+	if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
+		p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
+
+	if (!p_prev_isle)
+		list_add(&p_isle->list_entry, &p_archipelago->isles_list);
+	else
+		list_add(&p_isle->list_entry, &p_prev_isle->list_entry);
+}
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+			    struct qed_ooo_info *p_ooo_info,
+			    u32 cid,
+			    u8 ooo_isle,
+			    struct qed_ooo_buffer *p_buffer, u8 buffer_side)
+{
+	struct qed_ooo_isle *p_isle = NULL;
+
+	p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
+	if (!p_isle) {
+		DP_NOTICE(p_hwfn,
+			  "Isle %d is not found(cid %d)\n", ooo_isle, cid);
+		return;
+	}
+
+	if (buffer_side == QED_OOO_LEFT_BUF)
+		list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+	else
+		list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
+}
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+			struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
+{
+	struct qed_ooo_archipelago *p_archipelago = NULL;
+	struct qed_ooo_isle *p_right_isle = NULL;
+	struct qed_ooo_isle *p_left_isle = NULL;
+
+	p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+					 left_isle + 1);
+	if (!p_right_isle) {
+		DP_NOTICE(p_hwfn,
+			  "Right isle %d is not found(cid %d)\n",
+			  left_isle + 1, cid);
+		return;
+	}
+
+	p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+	list_del(&p_right_isle->list_entry);
+	p_ooo_info->cur_isles_number--;
+	if (left_isle) {
+		p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+						left_isle);
+		if (!p_left_isle) {
+			DP_NOTICE(p_hwfn,
+				  "Left isle %d is not found(cid %d)\n",
+				  left_isle, cid);
+			return;
+		}
+		list_splice_tail_init(&p_right_isle->buffers_list,
+				      &p_left_isle->buffers_list);
+	} else {
+		list_splice_tail_init(&p_right_isle->buffers_list,
+				      &p_ooo_info->ready_buffers_list);
+		if (list_empty(&p_archipelago->isles_list)) {
+			list_del(&p_archipelago->list_entry);
+			list_add(&p_archipelago->list_entry,
+				 &p_ooo_info->free_archipelagos_list);
+		}
+	}
+	list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
new file mode 100644
index 0000000..75c6e48
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -0,0 +1,116 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_OOO_H
+#define _QED_OOO_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+#define QED_MAX_NUM_ISLES	256
+#define QED_MAX_NUM_OOO_HISTORY_ENTRIES	512
+
+#define QED_OOO_LEFT_BUF	0
+#define QED_OOO_RIGHT_BUF	1
+
+struct qed_ooo_buffer {
+	struct list_head list_entry;
+	void *rx_buffer_virt_addr;
+	dma_addr_t rx_buffer_phys_addr;
+	u32 rx_buffer_size;
+	u16 packet_length;
+	u16 parse_flags;
+	u16 vlan;
+	u8 placement_offset;
+};
+
+struct qed_ooo_isle {
+	struct list_head list_entry;
+	struct list_head buffers_list;
+};
+
+struct qed_ooo_archipelago {
+	struct list_head list_entry;
+	struct list_head isles_list;
+	u32 cid;
+};
+
+struct qed_ooo_history {
+	struct ooo_opaque *p_cqes;
+	u32 head_idx;
+	u32 num_of_cqes;
+};
+
+struct qed_ooo_info {
+	struct list_head free_buffers_list;
+	struct list_head ready_buffers_list;
+	struct list_head free_isles_list;
+	struct list_head free_archipelagos_list;
+	struct list_head archipelagos_list;
+	struct qed_ooo_archipelago *p_archipelagos_mem;
+	struct qed_ooo_isle *p_isles_mem;
+	struct qed_ooo_history ooo_history;
+	u32 cur_isles_number;
+	u32 max_isles_number;
+	u32 gen_isles_number;
+};
+
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+				struct qed_ooo_info *p_ooo_info,
+				struct ooo_opaque *p_cqe);
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+				      struct qed_ooo_info *p_ooo_info,
+				      u32 cid);
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+			       struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+			     struct qed_ooo_info *p_ooo_info,
+			     struct qed_ooo_buffer *p_buffer);
+
+struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+			struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+			      struct qed_ooo_info *p_ooo_info,
+			      struct qed_ooo_buffer *p_buffer, u8 on_tail);
+
+struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+			 struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid, u8 drop_isle, u8 drop_size);
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+			  struct qed_ooo_info *p_ooo_info,
+			  u32 cid,
+			  u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+			    struct qed_ooo_info *p_ooo_info,
+			    u32 cid,
+			    u8 ooo_isle,
+			    struct qed_ooo_buffer *p_buffer, u8 buffer_side);
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+			struct qed_ooo_info *p_ooo_info, u32 cid,
+			u8 left_isle);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 2343005..1768cdb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2866,6 +2866,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
 	/* Tx header */
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
 				       1 + pkt->n_seg, 0, flags, 0,
+				       QED_LL2_TX_DEST_NW,
 				       qed_roce_flavor, pkt->header.baddr,
 				       pkt->header.len, pkt, 1);
 	if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index d3fa578..b44fd4c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -26,6 +26,7 @@
 #include "qed_int.h"
 #include "qed_iscsi.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_sriov.h"
@@ -253,6 +254,14 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 	case PROTOCOLID_ISCSI:
 		if (!IS_ENABLED(CONFIG_QEDI))
 			return -EINVAL;
+		if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
+			u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
+
+			qed_ooo_release_connection_isles(p_hwfn,
+							 p_hwfn->p_ooo_info,
+							 cid);
+			return 0;
+		}
 
 		if (p_hwfn->p_iscsi_info->event_cb) {
 			struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-- 
1.8.3.1

  parent reply	other threads:[~2016-10-19  5:48 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-19  5:01 [RFC 0/6] Add QLogic FastLinQ iSCSI (qedi) driver manish.rangankar
2016-10-19  5:01 ` manish.rangankar
2016-10-19  5:01 ` [RFC 1/6] qed: Add support for hardware offloaded iSCSI manish.rangankar
2016-10-19  5:01   ` manish.rangankar
2016-10-19  7:31   ` Hannes Reinecke
2016-10-19 22:28     ` Arun Easi
2016-10-19 22:28       ` Arun Easi
2016-10-19  9:09   ` Johannes Thumshirn
2016-10-20  0:14     ` Arun Easi
2016-10-20  0:14       ` Arun Easi
2016-10-20  7:09       ` Johannes Thumshirn
2016-10-19  5:01 ` manish.rangankar [this message]
2016-10-19  5:01   ` [RFC 2/6] qed: Add iSCSI out of order packet handling manish.rangankar
2016-10-19  7:36   ` Hannes Reinecke
2016-10-20 12:58     ` Mintz, Yuval
2016-10-19  9:39   ` Johannes Thumshirn
2016-10-20  0:43     ` Arun Easi
2016-10-20  0:43       ` Arun Easi
2016-10-19  5:01 ` [RFC 3/6] qedi: Add QLogic FastLinQ offload iSCSI driver framework manish.rangankar
2016-10-19  5:01   ` manish.rangankar
2016-10-19  7:45   ` Hannes Reinecke
2016-10-20  8:27     ` Rangankar, Manish
2016-10-19 10:02   ` Johannes Thumshirn
2016-10-20  8:41     ` Rangankar, Manish
2016-10-23 14:04     ` Rangankar, Manish
2016-10-19  5:01 ` [RFC 4/6] qedi: Add LL2 iSCSI interface for offload iSCSI manish.rangankar
2016-10-19  5:01   ` manish.rangankar
2016-10-19  7:53   ` Hannes Reinecke
2016-10-19  5:01 ` [RFC 5/6] qedi: Add support for iSCSI session management manish.rangankar
2016-10-19  5:01   ` manish.rangankar
2016-10-19  8:03   ` Hannes Reinecke
2016-10-20  9:09     ` Rangankar, Manish
2016-10-19 13:28   ` Johannes Thumshirn
2016-10-20  9:12     ` Rangankar, Manish
2016-10-19  5:01 ` [RFC 6/6] qedi: Add support for data path manish.rangankar
2016-10-19  5:01   ` manish.rangankar
2016-10-19 10:24   ` Hannes Reinecke
2016-10-20  9:24     ` Rangankar, Manish

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1476853273-22960-3-git-send-email-manish.rangankar@cavium.com \
    --to=manish.rangankar@cavium.com \
    --cc=QLogic-Storage-Upstream@cavium.com \
    --cc=Yuval.Mintz@cavium.com \
    --cc=Yuval.Mintz@qlogic.com \
    --cc=arun.easi@cavium.com \
    --cc=cleech@redhat.com \
    --cc=jejb@linux.vnet.ibm.com \
    --cc=lduncan@suse.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.