All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Kalderon <Michal.Kalderon@cavium.com>
To: michal.kalderon@cavium.com, ram.amrani@cavium.com,
	yuval.mintz@cavium.com, ariel.elior@cavium.com,
	davem@davemloft.net, netdev@vger.kernel.org,
	linux-rdma@vger.kernel.org, dledford@redhat.com
Cc: Michal Kalderon <Michal.Kalderon@cavium.com>,
	Yuval Mintz <Yuval.Mintz@cavium.com>,
	Ariel Elior <Ariel.Elior@cavium.com>
Subject: [PATCH net-next 07/12] qed: iWARP CM add passive side connect
Date: Sun, 2 Jul 2017 10:29:27 +0300	[thread overview]
Message-ID: <1498980572-29519-8-git-send-email-Michal.Kalderon@cavium.com> (raw)
In-Reply-To: <1498980572-29519-1-git-send-email-Michal.Kalderon@cavium.com>

This patch implements the passive side connect.
It addresses pre-allocating resources, creating a connection
element upon valid SYN packet received. Calling upper layer and
implementation of the accept/reject calls.

Error handling is not part of this patch.

Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
---
 drivers/net/ethernet/qlogic/qed/qed.h       |   2 +
 drivers/net/ethernet/qlogic/qed/qed_dev.c   |  11 +
 drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 939 +++++++++++++++++++++++++++-
 drivers/net/ethernet/qlogic/qed/qed_iwarp.h |  62 ++
 drivers/net/ethernet/qlogic/qed/qed_l2.c    |  13 -
 drivers/net/ethernet/qlogic/qed/qed_rdma.h  |   2 +
 drivers/net/ethernet/qlogic/qed/qed_sp.h    |   2 +
 include/linux/qed/common_hsi.h              |   2 +
 include/linux/qed/qed_rdma_if.h             |  26 +-
 9 files changed, 1039 insertions(+), 20 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fd8cd5e..91003bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -789,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 int qed_device_num_engines(struct qed_dev *cdev);
 int qed_device_get_port_id(struct qed_dev *cdev);
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+			 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
 
 #define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 6c8505d..4060a6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4127,3 +4127,14 @@ int qed_device_get_port_id(struct qed_dev *cdev)
 {
 	return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
 }
+
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+			 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
+{
+	((u8 *)fw_msb)[0] = mac[1];
+	((u8 *)fw_msb)[1] = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lsb)[0] = mac[5];
+	((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 2bab57c..a6dadae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -44,9 +44,30 @@
 
 #define QED_IWARP_ORD_DEFAULT		32
 #define QED_IWARP_IRD_DEFAULT		32
+#define QED_IWARP_MAX_FW_MSS		4120
+
+#define QED_EP_SIG 0xecabcdef
+
+struct mpa_v2_hdr {
+	__be16 ird;
+	__be16 ord;
+};
+
+#define MPA_V2_PEER2PEER_MODEL  0x8000
+#define MPA_V2_SEND_RTR         0x4000	/* on ird */
+#define MPA_V2_READ_RTR         0x4000	/* on ord */
+#define MPA_V2_WRITE_RTR        0x8000
+#define MPA_V2_IRD_ORD_MASK     0x3FFF
+
+#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
+
+#define QED_IWARP_INVALID_TCP_CID	0xffffffff
 #define QED_IWARP_RCV_WND_SIZE_DEF	(256 * 1024)
 #define QED_IWARP_RCV_WND_SIZE_MIN	(64 * 1024)
+#define TIMESTAMP_HEADER_SIZE		(12)
+
 #define QED_IWARP_TS_EN			BIT(0)
+#define QED_IWARP_DA_EN			BIT(1)
 #define QED_IWARP_PARAM_CRC_NEEDED	(1)
 #define QED_IWARP_PARAM_P2P		(1)
 
@@ -63,7 +84,8 @@ void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
 	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
 	dev->max_qp = min_t(u32,
 			    IWARP_MAX_QPS,
-			    p_hwfn->p_rdma_info->num_qps);
+			    p_hwfn->p_rdma_info->num_qps) -
+		      QED_IWARP_PREALLOC_CNT;
 
 	dev->max_cq = dev->max_qp;
 
@@ -78,12 +100,22 @@ void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	p_hwfn->b_rdma_enabled_in_prs = true;
 }
 
+/* We have two cid maps, one for tcp which should be used only from passive
+ * syn processing and replacing a pre-allocated ep in the list. The second
+ * for active tcp and for QPs.
+ */
 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 {
 	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 
 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+
+	if (cid < QED_IWARP_PREALLOC_CNT)
+		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+				    cid);
+	else
+		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+
 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
@@ -107,6 +139,45 @@ static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 	return rc;
 }
 
+static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+/* This function allocates a cid for passive tcp (called from syn receive)
+ * the reason it's separate from the regular cid allocation is because it
+ * is assured that these cids already have ilt allocated. They are preallocated
+ * to ensure that we won't need to allocate memory during syn processing
+ */
+static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
+{
+	int rc;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+	rc = qed_rdma_bmap_alloc_id(p_hwfn,
+				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+	if (rc) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "can't allocate iwarp tcp cid max-count=%d\n",
+			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
+
+		*cid = QED_IWARP_INVALID_TCP_CID;
+		return rc;
+	}
+
+	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
+					    p_hwfn->p_rdma_info->proto);
+	return 0;
+}
+
 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 			struct qed_rdma_qp *qp,
 			struct qed_rdma_create_qp_out_params *out_params)
@@ -403,6 +474,26 @@ int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	return rc;
 }
 
+static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
+				 struct qed_iwarp_ep *ep,
+				 bool remove_from_active_list)
+{
+	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+			  sizeof(*ep->ep_buffer_virt),
+			  ep->ep_buffer_virt, ep->ep_buffer_phys);
+
+	if (remove_from_active_list) {
+		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+		list_del(&ep->list_entry);
+		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	}
+
+	if (ep->qp)
+		ep->qp->ep = NULL;
+
+	kfree(ep);
+}
+
 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 {
 	int rc = 0;
@@ -424,6 +515,507 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	return rc;
 }
 
+static int
+qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
+{
+	struct qed_iwarp_ep *ep;
+	int rc;
+
+	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	ep->state = QED_IWARP_EP_INIT;
+
+	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+						sizeof(*ep->ep_buffer_virt),
+						&ep->ep_buffer_phys,
+						GFP_KERNEL);
+	if (!ep->ep_buffer_virt) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	ep->sig = QED_EP_SIG;
+
+	*ep_out = ep;
+
+	return 0;
+
+err:
+	kfree(ep);
+	return rc;
+}
+
+static void
+qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
+			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
+{
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
+		   p_tcp_ramrod->tcp.local_mac_addr_lo,
+		   p_tcp_ramrod->tcp.local_mac_addr_mid,
+		   p_tcp_ramrod->tcp.local_mac_addr_hi,
+		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
+		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
+		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
+
+	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n",
+			   p_tcp_ramrod->tcp.local_ip,
+			   p_tcp_ramrod->tcp.local_port,
+			   p_tcp_ramrod->tcp.remote_ip,
+			   p_tcp_ramrod->tcp.remote_port,
+			   p_tcp_ramrod->tcp.vlan_id);
+	} else {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n",
+			   p_tcp_ramrod->tcp.local_ip,
+			   p_tcp_ramrod->tcp.local_port,
+			   p_tcp_ramrod->tcp.remote_ip,
+			   p_tcp_ramrod->tcp.remote_port,
+			   p_tcp_ramrod->tcp.vlan_id);
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
+		   p_tcp_ramrod->tcp.flow_label,
+		   p_tcp_ramrod->tcp.ttl,
+		   p_tcp_ramrod->tcp.tos_or_tc,
+		   p_tcp_ramrod->tcp.mss,
+		   p_tcp_ramrod->tcp.rcv_wnd_scale,
+		   p_tcp_ramrod->tcp.connect_mode,
+		   p_tcp_ramrod->tcp.flags);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
+		   p_tcp_ramrod->tcp.syn_ip_payload_length,
+		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
+		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
+}
+
+static int
+qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
+	struct tcp_offload_params_opt2 *tcp;
+	struct qed_sp_init_data init_data;
+	struct qed_spq_entry *p_ent;
+	dma_addr_t async_output_phys;
+	dma_addr_t in_pdata_phys;
+	u16 physical_q;
+	u8 tcp_flags;
+	int rc;
+	int i;
+
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = ep->tcp_cid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = QED_SPQ_MODE_CB;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
+				 PROTOCOLID_IWARP, &init_data);
+	if (rc)
+		return rc;
+
+	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
+
+	in_pdata_phys = ep->ep_buffer_phys +
+			offsetof(struct qed_iwarp_ep_memory, in_pdata);
+	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
+		       in_pdata_phys);
+
+	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
+	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+
+	async_output_phys = ep->ep_buffer_phys +
+			    offsetof(struct qed_iwarp_ep_memory, async_output);
+	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
+		       async_output_phys);
+
+	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
+	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
+	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
+
+	tcp = &p_tcp_ramrod->tcp;
+	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
+			    &tcp->remote_mac_addr_mid,
+			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
+	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
+			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
+
+	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
+
+	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
+	tcp->flags = 0;
+	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
+		  !!(tcp_flags & QED_IWARP_TS_EN));
+
+	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
+		  !!(tcp_flags & QED_IWARP_DA_EN));
+
+	tcp->ip_version = ep->cm_info.ip_version;
+
+	for (i = 0; i < 4; i++) {
+		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
+		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
+	}
+
+	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
+	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
+	tcp->mss = cpu_to_le16(ep->mss);
+	tcp->flow_label = 0;
+	tcp->ttl = 0x40;
+	tcp->tos_or_tc = 0;
+
+	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
+	tcp->connect_mode = ep->connect_mode;
+
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		tcp->syn_ip_payload_length =
+			cpu_to_le16(ep->syn_ip_payload_length);
+		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
+		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
+	}
+
+	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
+
+	return rc;
+}
+
+static void
+qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	struct qed_iwarp_cm_event_params params;
+	struct mpa_v2_hdr *mpa_v2;
+	union async_output *async_data;
+	u16 mpa_ord, mpa_ird;
+	u8 mpa_hdr_size = 0;
+	u8 mpa_rev;
+
+	async_data = &ep->ep_buffer_virt->async_output;
+
+	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
+		   async_data->mpa_request.ulp_data_len,
+		   mpa_rev, *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata)));
+
+	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+		/* Read ord/ird values from private data buffer */
+		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
+		mpa_hdr_size = sizeof(*mpa_v2);
+
+		mpa_ord = ntohs(mpa_v2->ord);
+		mpa_ird = ntohs(mpa_v2->ird);
+
+		/* Temprary store in cm_info incoming ord/ird requested, later
+		 * replace with negotiated value during accept
+		 */
+		ep->cm_info.ord = (u8)min_t(u16,
+					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
+					    QED_IWARP_ORD_DEFAULT);
+
+		ep->cm_info.ird = (u8)min_t(u16,
+					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
+					    QED_IWARP_IRD_DEFAULT);
+
+		/* Peer2Peer negotiation */
+		ep->rtr_type = MPA_RTR_TYPE_NONE;
+		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
+			if (mpa_ord & MPA_V2_WRITE_RTR)
+				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
+
+			if (mpa_ord & MPA_V2_READ_RTR)
+				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
+
+			if (mpa_ird & MPA_V2_SEND_RTR)
+				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
+
+			ep->rtr_type &= iwarp_info->rtr_type;
+
+			/* if we're left with no match send our capabilities */
+			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
+				ep->rtr_type = iwarp_info->rtr_type;
+		}
+
+		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
+	} else {
+		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
+		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
+		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
+		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
+		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
+
+	/* Strip mpa v2 hdr from private data before sending to upper layer */
+	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
+
+	ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
+				       mpa_hdr_size;
+
+	params.event = QED_IWARP_EVENT_MPA_REQUEST;
+	params.cm_info = &ep->cm_info;
+	params.ep_context = ep;
+	params.status = 0;
+
+	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
+	ep->event_cb(ep->cb_context, &params);
+}
+
+static int
+qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+	struct qed_sp_init_data init_data;
+	dma_addr_t async_output_phys;
+	struct qed_spq_entry *p_ent;
+	dma_addr_t out_pdata_phys;
+	dma_addr_t in_pdata_phys;
+	struct qed_rdma_qp *qp;
+	bool reject;
+	int rc;
+
+	if (!ep)
+		return -EINVAL;
+
+	qp = ep->qp;
+	reject = !qp;
+
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = reject ? ep->tcp_cid : qp->icid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+				 PROTOCOLID_IWARP, &init_data);
+	if (rc)
+		return rc;
+
+	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
+	out_pdata_phys = ep->ep_buffer_phys +
+			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
+	DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
+		       out_pdata_phys);
+	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
+	    ep->cm_info.private_data_len;
+	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
+
+	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
+	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
+
+	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
+
+	in_pdata_phys = ep->ep_buffer_phys +
+			offsetof(struct qed_iwarp_ep_memory, in_pdata);
+	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
+	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
+		       in_pdata_phys);
+	p_mpa_ramrod->incoming_ulp_buffer.len =
+	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+	async_output_phys = ep->ep_buffer_phys +
+			    offsetof(struct qed_iwarp_ep_memory, async_output);
+	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
+		       async_output_phys);
+	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+	if (!reject) {
+		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
+			       qp->shared_queue_phys_addr);
+		p_mpa_ramrod->stats_counter_id =
+		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
+	} else {
+		p_mpa_ramrod->common.reject = 1;
+	}
+
+	p_mpa_ramrod->mode = ep->mpa_rev;
+	SET_FIELD(p_mpa_ramrod->rtr_pref,
+		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
+
+	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+	if (!reject)
+		ep->cid = qp->icid;	/* Now they're migrated. */
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_RDMA,
+		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
+		   reject ? 0xffff : qp->icid,
+		   ep->tcp_cid,
+		   rc,
+		   ep->cm_info.ird,
+		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
+	return rc;
+}
+
+static void
+qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	ep->state = QED_IWARP_EP_INIT;
+	if (ep->qp)
+		ep->qp->ep = NULL;
+	ep->qp = NULL;
+	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
+
+	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+		/* We don't care about the return code, it's ok if tcp_cid
+		 * remains invalid...in this case we'll defer allocation
+		 */
+		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+	}
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	list_del(&ep->list_entry);
+	list_add_tail(&ep->list_entry,
+		      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+}
+
+#define QED_IWARP_CONNECT_MODE_STRING(ep) \
+	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
+
+/* Called as a result of the event:
+ * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
+ */
+static void
+qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
+		       struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+	struct qed_iwarp_cm_event_params params;
+
+	params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+	params.cm_info = &ep->cm_info;
+
+	params.ep_context = ep;
+
+	ep->state = QED_IWARP_EP_CLOSED;
+
+	switch (fw_return_code) {
+	case RDMA_RETURN_OK:
+		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
+		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
+		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
+		ep->state = QED_IWARP_EP_ESTABLISHED;
+		params.status = 0;
+		break;
+	default:
+		params.status = -ECONNRESET;
+		break;
+	}
+
+	ep->event_cb(ep->cb_context, &params);
+}
+
+static void
+qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
+			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
+{
+	struct mpa_v2_hdr *mpa_v2_params;
+	u16 mpa_ird, mpa_ord;
+
+	*mpa_data_size = 0;
+	if (MPA_REV2(ep->mpa_rev)) {
+		mpa_v2_params =
+		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
+		*mpa_data_size = sizeof(*mpa_v2_params);
+
+		mpa_ird = (u16)ep->cm_info.ird;
+		mpa_ord = (u16)ep->cm_info.ord;
+
+		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
+			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
+
+			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
+				mpa_ird |= MPA_V2_SEND_RTR;
+
+			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
+				mpa_ord |= MPA_V2_WRITE_RTR;
+
+			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
+				mpa_ord |= MPA_V2_READ_RTR;
+		}
+
+		mpa_v2_params->ird = htons(mpa_ird);
+		mpa_v2_params->ord = htons(mpa_ord);
+
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
+			   mpa_v2_params->ird,
+			   mpa_v2_params->ord,
+			   *((u32 *)mpa_v2_params),
+			   mpa_ord & MPA_V2_IRD_ORD_MASK,
+			   mpa_ird & MPA_V2_IRD_ORD_MASK,
+			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
+			   !!(mpa_ird & MPA_V2_SEND_RTR),
+			   !!(mpa_ord & MPA_V2_WRITE_RTR),
+			   !!(mpa_ord & MPA_V2_READ_RTR));
+	}
+}
+
+static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
+{
+	struct qed_iwarp_ep *ep = NULL;
+	int rc;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+		DP_ERR(p_hwfn, "Ep list is empty\n");
+		goto out;
+	}
+
+	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+			      struct qed_iwarp_ep, list_entry);
+
+	/* in some cases we could have failed allocating a tcp cid when added
+	 * from accept / failure... retry now..this is not the common case.
+	 */
+	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+
+		/* if we fail we could look for another entry with a valid
+		 * tcp_cid, but since we don't expect to reach this anyway
+		 * it's not worth the handling
+		 */
+		if (rc) {
+			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+			ep = NULL;
+			goto out;
+		}
+	}
+
+	list_del(&ep->list_entry);
+
+out:
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	return ep;
+}
+
 #define QED_IWARP_MAX_CID_CLEAN_TIME  100
 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
 
@@ -465,20 +1057,213 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 
 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
 {
+	int rc;
+	int i;
+
+	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
+					    &p_hwfn->p_rdma_info->tcp_cid_map);
+	if (rc)
+		return rc;
+
+	/* Now free the tcp cids from the main cid map */
+	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
+		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
+
 	/* Now wait for all cids to be completed */
 	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
 					      &p_hwfn->p_rdma_info->cid_map);
 }
 
+static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
+{
+	struct qed_iwarp_ep *ep;
+
+	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+				      struct qed_iwarp_ep, list_entry);
+
+		if (!ep) {
+			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+			break;
+		}
+		list_del(&ep->list_entry);
+
+		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
+			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
+
+		qed_iwarp_destroy_ep(p_hwfn, ep, false);
+	}
+}
+
+static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
+{
+	struct qed_iwarp_ep *ep;
+	int rc = 0;
+	int count;
+	u32 cid;
+	int i;
+
+	count = init ? QED_IWARP_PREALLOC_CNT : 1;
+	for (i = 0; i < count; i++) {
+		rc = qed_iwarp_create_ep(p_hwfn, &ep);
+		if (rc)
+			return rc;
+
+		/* During initialization we allocate from the main pool,
+		 * afterwards we allocate only from the tcp_cid.
+		 */
+		if (init) {
+			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+			if (rc)
+				goto err;
+			qed_iwarp_set_tcp_cid(p_hwfn, cid);
+		} else {
+			/* We don't care about the return code, it's ok if
+			 * tcp_cid remains invalid...in this case we'll
+			 * defer allocation
+			 */
+			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
+		}
+
+		ep->tcp_cid = cid;
+
+		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+		list_add_tail(&ep->list_entry,
+			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	}
+
+	return rc;
+
+err:
+	qed_iwarp_destroy_ep(p_hwfn, ep, false);
+
+	return rc;
+}
+
 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
 {
+	int rc;
+
+	/* Allocate bitmap for tcp cid. These are used by passive side
+	 * to ensure it can allocate a tcp cid during dpc that was
+	 * pre-acquired and doesn't require dynamic allocation of ilt
+	 */
+	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
+	if (rc) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "Failed to allocate tcp cid, rc = %d\n", rc);
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
 	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 
-	return 0;
+	return qed_iwarp_prealloc_ep(p_hwfn, true);
 }
 
 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 {
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
+}
+
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
+{
+	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+	struct qed_iwarp_ep *ep;
+	u8 mpa_data_size = 0;
+	int rc;
+
+	ep = (struct qed_iwarp_ep *)iparams->ep_context;
+	if (!ep) {
+		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+		   iparams->qp->icid, ep->tcp_cid);
+
+	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
+	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+			   iparams->qp->icid,
+			   ep->tcp_cid, iparams->ord, iparams->ord);
+		return -EINVAL;
+	}
+
+	qed_iwarp_prealloc_ep(p_hwfn, false);
+
+	ep->cb_context = iparams->cb_context;
+	ep->qp = iparams->qp;
+	ep->qp->ep = ep;
+
+	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+		/* Negotiate ord/ird: if upperlayer requested ord larger than
+		 * ird advertised by remote, we need to decrease our ord
+		 */
+		if (iparams->ord > ep->cm_info.ird)
+			iparams->ord = ep->cm_info.ird;
+
+		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
+		    (iparams->ird == 0))
+			iparams->ird = 1;
+	}
+
+	/* Update cm_info ord/ird to be negotiated values */
+	ep->cm_info.ord = iparams->ord;
+	ep->cm_info.ird = iparams->ird;
+
+	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+	ep->cm_info.private_data_len = iparams->private_data_len +
+				       mpa_data_size;
+
+	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+	       iparams->private_data, iparams->private_data_len);
+
+	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
+	if (rc)
+		qed_iwarp_modify_qp(p_hwfn,
+				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
+
+	return rc;
+}
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
+{
+	struct qed_hwfn *p_hwfn = rdma_cxt;
+	struct qed_iwarp_ep *ep;
+	u8 mpa_data_size = 0;
+
+	ep = iparams->ep_context;
+	if (!ep) {
+		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
+
+	ep->cb_context = iparams->cb_context;
+	ep->qp = NULL;
+
+	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+	ep->cm_info.private_data_len = iparams->private_data_len +
+				       mpa_data_size;
+
+	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+	       iparams->private_data, iparams->private_data_len);
+
+	return qed_iwarp_mpa_offload(p_hwfn, ep);
 }
 
 static void
@@ -526,6 +1311,38 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 	return rc;
 }
 
+static bool
+qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
+{
+	struct qed_iwarp_ep *ep = NULL;
+	bool found = false;
+
+	list_for_each_entry(ep,
+			    &p_hwfn->p_rdma_info->iwarp.ep_list,
+			    list_entry) {
+		if ((ep->cm_info.local_port == cm_info->local_port) &&
+		    (ep->cm_info.remote_port == cm_info->remote_port) &&
+		    (ep->cm_info.vlan == cm_info->vlan) &&
+		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
+			    sizeof(cm_info->local_ip)) &&
+		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
+			    sizeof(cm_info->remote_ip))) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		DP_NOTICE(p_hwfn,
+			  "SYN received on active connection - dropping\n");
+		qed_iwarp_print_cm_info(p_hwfn, cm_info);
+
+		return true;
+	}
+
+	return false;
+}
+
 static struct qed_iwarp_listener *
 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
 		       struct qed_iwarp_cm_info *cm_info)
@@ -596,9 +1413,8 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 
 	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
 
-	memcpy(remote_mac_addr, ethh->h_source, ETH_ALEN);
-
-	memcpy(local_mac_addr, ethh->h_dest, ETH_ALEN);
+	ether_addr_copy(remote_mac_addr, ethh->h_source);
+	ether_addr_copy(local_mac_addr, ethh->h_dest);
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
 		   eth_type, ethh->h_source);
@@ -661,9 +1477,12 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 	struct qed_hwfn *p_hwfn = cxt;
 	u8 remote_mac_addr[ETH_ALEN];
 	u8 local_mac_addr[ETH_ALEN];
+	struct qed_iwarp_ep *ep;
 	int tcp_start_offset;
+	u8 ts_hdr_size = 0;
 	u8 ll2_syn_handle;
 	int payload_len;
+	u32 hdr_size;
 	int rc;
 
 	memset(&cm_info, 0, sizeof(cm_info));
@@ -719,6 +1538,49 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 	}
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
+	/* There may be an open ep on this connection if this is a syn
+	 * retrasnmit... need to make sure there isn't...
+	 */
+	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
+		goto err;
+
+	ep = qed_iwarp_get_free_ep(p_hwfn);
+	if (!ep)
+		goto err;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
+	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
+
+	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
+
+	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+
+	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
+		   ts_hdr_size;
+	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
+	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+	ep->event_cb = listener->event_cb;
+	ep->cb_context = listener->cb_context;
+	ep->connect_mode = TCP_CONNECT_PASSIVE;
+
+	ep->syn = buf;
+	ep->syn_ip_payload_length = (u16)payload_len;
+	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
+			   tcp_start_offset;
+
+	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+	if (rc) {
+		qed_iwarp_return_ep(p_hwfn, ep);
+		goto err;
+	}
+
+	return;
 err:
 	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
 }
@@ -905,7 +1767,12 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 
 	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
 
+	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
+				MPA_RTR_TYPE_ZERO_WRITE |
+				MPA_RTR_TYPE_ZERO_READ;
+
 	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
 	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
 
 	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
@@ -918,6 +1785,7 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 	int rc;
 
+	qed_iwarp_free_prealloc_ep(p_hwfn);
 	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
 	if (rc)
 		return rc;
@@ -927,11 +1795,70 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
 }
 
+void
+qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
+			   struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+	/* Done with the SYN packet, post back to ll2 rx */
+	qed_iwarp_ll2_post_rx(p_hwfn, ep->syn,
+			      p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
+	ep->syn = NULL;
+
+	/* If connect failed - upper layer doesn't know about it */
+	qed_iwarp_mpa_received(p_hwfn, ep);
+}
+
+static inline bool
+qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	if (!ep || (ep->sig != QED_EP_SIG)) {
+		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
+		return false;
+	}
+
+	return true;
+}
+
 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
 				 u8 fw_event_code, u16 echo,
 				 union event_ring_data *data,
 				 u8 fw_return_code)
 {
+	struct regpair *fw_handle = &data->rdma_data.async_handle;
+	struct qed_iwarp_ep *ep = NULL;
+	u16 cid;
+
+	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
+						       fw_handle->lo);
+
+	switch (fw_event_code) {
+	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
+		/* Async completion after TCP 3-way handshake */
+		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+			return -EINVAL;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
+			   ep->tcp_cid, fw_return_code);
+		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
+		break;
+	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
+		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+			return -EINVAL;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
+			   ep->cid, fw_return_code);
+		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
+		break;
+	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
+		cid = (u16)le32_to_cpu(fw_handle->lo);
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
+		qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+		break;
+	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index 29005ac..bedac98 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -42,6 +42,8 @@ enum qed_iwarp_qp_state {
 
 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
 
+#define QED_IWARP_PREALLOC_CNT  (256)
+
 #define QED_IWARP_LL2_SYN_TX_SIZE       (128)
 #define QED_IWARP_LL2_SYN_RX_SIZE       (256)
 #define QED_IWARP_MAX_SYN_PKT_SIZE      (128)
@@ -55,6 +57,8 @@ struct qed_iwarp_ll2_buff {
 
 struct qed_iwarp_info {
 	struct list_head listen_list;	/* qed_iwarp_listener */
+	struct list_head ep_list;	/* qed_iwarp_ep */
+	struct list_head ep_free_list;	/* pre-allocated ep's */
 	spinlock_t iw_lock;	/* for iwarp resources */
 	spinlock_t qp_lock;	/* for teardown races */
 	u32 rcv_wnd_scale;
@@ -68,6 +72,61 @@ struct qed_iwarp_info {
 	enum mpa_rtr_type rtr_type;
 };
 
+enum qed_iwarp_ep_state {
+	QED_IWARP_EP_INIT,
+	QED_IWARP_EP_MPA_REQ_RCVD,
+	QED_IWARP_EP_MPA_OFFLOADED,
+	QED_IWARP_EP_ESTABLISHED,
+	QED_IWARP_EP_CLOSED
+};
+
+union async_output {
+	struct iwarp_eqe_data_mpa_async_completion mpa_response;
+	struct iwarp_eqe_data_tcp_async_completion mpa_request;
+};
+
+#define QED_MAX_PRIV_DATA_LEN (512)
+struct qed_iwarp_ep_memory {
+	u8 in_pdata[QED_MAX_PRIV_DATA_LEN];
+	u8 out_pdata[QED_MAX_PRIV_DATA_LEN];
+	union async_output async_output;
+};
+
+/* Endpoint structure represents a TCP connection. This connection can be
+ * associated with a QP or not (in which case QP==NULL)
+ */
+struct qed_iwarp_ep {
+	struct list_head list_entry;
+	struct qed_rdma_qp *qp;
+	struct qed_iwarp_ep_memory *ep_buffer_virt;
+	dma_addr_t ep_buffer_phys;
+	enum qed_iwarp_ep_state state;
+	int sig;
+	struct qed_iwarp_cm_info cm_info;
+	enum tcp_connect_mode connect_mode;
+	enum mpa_rtr_type rtr_type;
+	enum mpa_negotiation_mode mpa_rev;
+	u32 tcp_cid;
+	u32 cid;
+	u16 mss;
+	u8 remote_mac_addr[6];
+	u8 local_mac_addr[6];
+	bool mpa_reply_processed;
+
+	/* For Passive side - syn packet related data */
+	u16 syn_ip_payload_length;
+	struct qed_iwarp_ll2_buff *syn;
+	dma_addr_t syn_phy_addr;
+
+	/* The event_cb function is called for asynchrounous events associated
+	 * with the ep. It is initialized at different entry points depending
+	 * on whether the ep is the tcp connection active side or passive side
+	 * The cb_context is passed to the event_cb function.
+	 */
+	iwarp_event_handler event_cb;
+	void *cb_context;
+};
+
 struct qed_iwarp_listener {
 	struct list_head list_entry;
 
@@ -115,6 +174,9 @@ void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 			struct qed_iwarp_listen_in *iparams,
 			struct qed_iwarp_listen_out *oparams);
 
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams);
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
 
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 27ea54b..0ba5ec8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1227,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
 	return action;
 }
 
-static void qed_set_fw_mac_addr(__le16 *fw_msb,
-				__le16 *fw_mid,
-				__le16 *fw_lsb,
-				u8 *mac)
-{
-	((u8 *)fw_msb)[0] = mac[1];
-	((u8 *)fw_msb)[1] = mac[0];
-	((u8 *)fw_mid)[0] = mac[3];
-	((u8 *)fw_mid)[1] = mac[2];
-	((u8 *)fw_lsb)[0] = mac[5];
-	((u8 *)fw_lsb)[1] = mac[4];
-}
-
 static int
 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
 			u16 opaque_fid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 90e4e0f..18ec9cb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -85,6 +85,7 @@ struct qed_rdma_info {
 	struct qed_bmap qp_map;
 	struct qed_bmap srq_map;
 	struct qed_bmap cid_map;
+	struct qed_bmap tcp_cid_map;
 	struct qed_bmap real_cid_map;
 	struct qed_bmap dpi_map;
 	struct qed_bmap toggle_bits;
@@ -167,6 +168,7 @@ struct qed_rdma_qp {
 
 	void *shared_queue;
 	dma_addr_t shared_queue_phys_addr;
+	struct qed_iwarp_ep *ep;
 };
 
 #if IS_ENABLED(CONFIG_QED_RDMA)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index c3752c5..ab4ad8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -111,6 +111,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 	struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
 	struct rdma_srq_modify_ramrod_data rdma_modify_srq;
 	struct iwarp_create_qp_ramrod_data iwarp_create_qp;
+	struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
+	struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
 	struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
 	struct iwarp_init_func_ramrod_data iwarp_init_func;
 	struct fcoe_init_ramrod_params fcoe_init;
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 885ae13..39e2a2a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -38,6 +38,8 @@
 #include <linux/slab.h>
 
 /* dma_addr_t manip */
+#define PTR_LO(x)               ((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x)               ((u32)((((uintptr_t)(x)) >> 16) >> 16))
 #define DMA_LO_LE(x)		cpu_to_le32(lower_32_bits(x))
 #define DMA_HI_LE(x)		cpu_to_le32(upper_32_bits(x))
 #define DMA_REGPAIR_LE(x, val)	do { \
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 28df568..c4c241f 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -471,7 +471,8 @@ struct qed_rdma_counters_out_params {
 #define QED_ROCE_TX_FRAG_FAILURE        (2)
 
 enum qed_iwarp_event_type {
-	QED_IWARP_EVENT_MPA_REQUEST,	/* Passive side request received */
+	QED_IWARP_EVENT_MPA_REQUEST,	  /* Passive side request received */
+	QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
 };
 
 enum qed_tcp_ip_version {
@@ -516,6 +517,23 @@ struct qed_iwarp_listen_out {
 	void *handle;
 };
 
+struct qed_iwarp_accept_in {
+	void *ep_context;
+	void *cb_context;
+	struct qed_rdma_qp *qp;
+	const void *private_data;
+	u16 private_data_len;
+	u8 ord;
+	u8 ird;
+};
+
+struct qed_iwarp_reject_in {
+	void *ep_context;
+	void *cb_context;
+	const void *private_data;
+	u16 private_data_len;
+};
+
 struct qed_roce_ll2_header {
 	void *vaddr;
 	dma_addr_t baddr;
@@ -626,6 +644,12 @@ struct qed_rdma_ops {
 				   struct qed_iwarp_listen_in *iparams,
 				   struct qed_iwarp_listen_out *oparams);
 
+	int (*iwarp_accept)(void *rdma_cxt,
+			    struct qed_iwarp_accept_in *iparams);
+
+	int (*iwarp_reject)(void *rdma_cxt,
+			    struct qed_iwarp_reject_in *iparams);
+
 	int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
 
 };
-- 
1.8.3.1

WARNING: multiple messages have this Message-ID (diff)
From: Michal Kalderon <Michal.Kalderon@cavium.com>
To: <michal.kalderon@cavium.com>, <ram.amrani@cavium.com>,
	<yuval.mintz@cavium.com>, <ariel.elior@cavium.com>,
	<davem@davemloft.net>, <netdev@vger.kernel.org>,
	<linux-rdma@vger.kernel.org>, <dledford@redhat.com>
Cc: Michal Kalderon <Michal.Kalderon@cavium.com>,
	Yuval Mintz <Yuval.Mintz@cavium.com>,
	Ariel Elior <Ariel.Elior@cavium.com>
Subject: [PATCH net-next 07/12] qed: iWARP CM add passive side connect
Date: Sun, 2 Jul 2017 10:29:27 +0300	[thread overview]
Message-ID: <1498980572-29519-8-git-send-email-Michal.Kalderon@cavium.com> (raw)
In-Reply-To: <1498980572-29519-1-git-send-email-Michal.Kalderon@cavium.com>

This patch implements the passive side connect.
It addresses pre-allocating resources, creating a connection
element upon valid SYN packet received. Calling upper layer and
implementation of the accept/reject calls.

Error handling is not part of this patch.

Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
---
 drivers/net/ethernet/qlogic/qed/qed.h       |   2 +
 drivers/net/ethernet/qlogic/qed/qed_dev.c   |  11 +
 drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 939 +++++++++++++++++++++++++++-
 drivers/net/ethernet/qlogic/qed/qed_iwarp.h |  62 ++
 drivers/net/ethernet/qlogic/qed/qed_l2.c    |  13 -
 drivers/net/ethernet/qlogic/qed/qed_rdma.h  |   2 +
 drivers/net/ethernet/qlogic/qed/qed_sp.h    |   2 +
 include/linux/qed/common_hsi.h              |   2 +
 include/linux/qed/qed_rdma_if.h             |  26 +-
 9 files changed, 1039 insertions(+), 20 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fd8cd5e..91003bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -789,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 int qed_device_num_engines(struct qed_dev *cdev);
 int qed_device_get_port_id(struct qed_dev *cdev);
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+			 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
 
 #define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 6c8505d..4060a6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4127,3 +4127,14 @@ int qed_device_get_port_id(struct qed_dev *cdev)
 {
 	return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
 }
+
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+			 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
+{
+	((u8 *)fw_msb)[0] = mac[1];
+	((u8 *)fw_msb)[1] = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lsb)[0] = mac[5];
+	((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 2bab57c..a6dadae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -44,9 +44,30 @@
 
 #define QED_IWARP_ORD_DEFAULT		32
 #define QED_IWARP_IRD_DEFAULT		32
+#define QED_IWARP_MAX_FW_MSS		4120
+
+#define QED_EP_SIG 0xecabcdef
+
+struct mpa_v2_hdr {
+	__be16 ird;
+	__be16 ord;
+};
+
+#define MPA_V2_PEER2PEER_MODEL  0x8000
+#define MPA_V2_SEND_RTR         0x4000	/* on ird */
+#define MPA_V2_READ_RTR         0x4000	/* on ord */
+#define MPA_V2_WRITE_RTR        0x8000
+#define MPA_V2_IRD_ORD_MASK     0x3FFF
+
+#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
+
+#define QED_IWARP_INVALID_TCP_CID	0xffffffff
 #define QED_IWARP_RCV_WND_SIZE_DEF	(256 * 1024)
 #define QED_IWARP_RCV_WND_SIZE_MIN	(64 * 1024)
+#define TIMESTAMP_HEADER_SIZE		(12)
+
 #define QED_IWARP_TS_EN			BIT(0)
+#define QED_IWARP_DA_EN			BIT(1)
 #define QED_IWARP_PARAM_CRC_NEEDED	(1)
 #define QED_IWARP_PARAM_P2P		(1)
 
@@ -63,7 +84,8 @@ void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
 	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
 	dev->max_qp = min_t(u32,
 			    IWARP_MAX_QPS,
-			    p_hwfn->p_rdma_info->num_qps);
+			    p_hwfn->p_rdma_info->num_qps) -
+		      QED_IWARP_PREALLOC_CNT;
 
 	dev->max_cq = dev->max_qp;
 
@@ -78,12 +100,22 @@ void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	p_hwfn->b_rdma_enabled_in_prs = true;
 }
 
+/* We have two cid maps, one for tcp which should be used only from passive
+ * syn processing and replacing a pre-allocated ep in the list. The second
+ * for active tcp and for QPs.
+ */
 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 {
 	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 
 	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+
+	if (cid < QED_IWARP_PREALLOC_CNT)
+		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+				    cid);
+	else
+		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+
 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
@@ -107,6 +139,45 @@ static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 	return rc;
 }
 
+static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+/* This function allocates a cid for passive tcp (called from syn receive)
+ * the reason it's separate from the regular cid allocation is because it
+ * is assured that these cids already have ilt allocated. They are preallocated
+ * to ensure that we won't need to allocate memory during syn processing
+ */
+static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
+{
+	int rc;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+	rc = qed_rdma_bmap_alloc_id(p_hwfn,
+				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+	if (rc) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "can't allocate iwarp tcp cid max-count=%d\n",
+			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
+
+		*cid = QED_IWARP_INVALID_TCP_CID;
+		return rc;
+	}
+
+	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
+					    p_hwfn->p_rdma_info->proto);
+	return 0;
+}
+
 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 			struct qed_rdma_qp *qp,
 			struct qed_rdma_create_qp_out_params *out_params)
@@ -403,6 +474,26 @@ int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	return rc;
 }
 
+static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
+				 struct qed_iwarp_ep *ep,
+				 bool remove_from_active_list)
+{
+	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+			  sizeof(*ep->ep_buffer_virt),
+			  ep->ep_buffer_virt, ep->ep_buffer_phys);
+
+	if (remove_from_active_list) {
+		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+		list_del(&ep->list_entry);
+		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	}
+
+	if (ep->qp)
+		ep->qp->ep = NULL;
+
+	kfree(ep);
+}
+
 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 {
 	int rc = 0;
@@ -424,6 +515,507 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	return rc;
 }
 
+static int
+qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
+{
+	struct qed_iwarp_ep *ep;
+	int rc;
+
+	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	ep->state = QED_IWARP_EP_INIT;
+
+	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+						sizeof(*ep->ep_buffer_virt),
+						&ep->ep_buffer_phys,
+						GFP_KERNEL);
+	if (!ep->ep_buffer_virt) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	ep->sig = QED_EP_SIG;
+
+	*ep_out = ep;
+
+	return 0;
+
+err:
+	kfree(ep);
+	return rc;
+}
+
+static void
+qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
+			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
+{
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
+		   p_tcp_ramrod->tcp.local_mac_addr_lo,
+		   p_tcp_ramrod->tcp.local_mac_addr_mid,
+		   p_tcp_ramrod->tcp.local_mac_addr_hi,
+		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
+		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
+		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
+
+	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n",
+			   p_tcp_ramrod->tcp.local_ip,
+			   p_tcp_ramrod->tcp.local_port,
+			   p_tcp_ramrod->tcp.remote_ip,
+			   p_tcp_ramrod->tcp.remote_port,
+			   p_tcp_ramrod->tcp.vlan_id);
+	} else {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n",
+			   p_tcp_ramrod->tcp.local_ip,
+			   p_tcp_ramrod->tcp.local_port,
+			   p_tcp_ramrod->tcp.remote_ip,
+			   p_tcp_ramrod->tcp.remote_port,
+			   p_tcp_ramrod->tcp.vlan_id);
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
+		   p_tcp_ramrod->tcp.flow_label,
+		   p_tcp_ramrod->tcp.ttl,
+		   p_tcp_ramrod->tcp.tos_or_tc,
+		   p_tcp_ramrod->tcp.mss,
+		   p_tcp_ramrod->tcp.rcv_wnd_scale,
+		   p_tcp_ramrod->tcp.connect_mode,
+		   p_tcp_ramrod->tcp.flags);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
+		   p_tcp_ramrod->tcp.syn_ip_payload_length,
+		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
+		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
+}
+
+static int
+qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
+	struct tcp_offload_params_opt2 *tcp;
+	struct qed_sp_init_data init_data;
+	struct qed_spq_entry *p_ent;
+	dma_addr_t async_output_phys;
+	dma_addr_t in_pdata_phys;
+	u16 physical_q;
+	u8 tcp_flags;
+	int rc;
+	int i;
+
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = ep->tcp_cid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = QED_SPQ_MODE_CB;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
+				 PROTOCOLID_IWARP, &init_data);
+	if (rc)
+		return rc;
+
+	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
+
+	in_pdata_phys = ep->ep_buffer_phys +
+			offsetof(struct qed_iwarp_ep_memory, in_pdata);
+	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
+		       in_pdata_phys);
+
+	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
+	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+
+	async_output_phys = ep->ep_buffer_phys +
+			    offsetof(struct qed_iwarp_ep_memory, async_output);
+	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
+		       async_output_phys);
+
+	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
+	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
+	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
+
+	tcp = &p_tcp_ramrod->tcp;
+	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
+			    &tcp->remote_mac_addr_mid,
+			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
+	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
+			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
+
+	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
+
+	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
+	tcp->flags = 0;
+	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
+		  !!(tcp_flags & QED_IWARP_TS_EN));
+
+	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
+		  !!(tcp_flags & QED_IWARP_DA_EN));
+
+	tcp->ip_version = ep->cm_info.ip_version;
+
+	for (i = 0; i < 4; i++) {
+		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
+		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
+	}
+
+	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
+	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
+	tcp->mss = cpu_to_le16(ep->mss);
+	tcp->flow_label = 0;
+	tcp->ttl = 0x40;
+	tcp->tos_or_tc = 0;
+
+	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
+	tcp->connect_mode = ep->connect_mode;
+
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		tcp->syn_ip_payload_length =
+			cpu_to_le16(ep->syn_ip_payload_length);
+		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
+		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
+	}
+
+	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
+
+	return rc;
+}
+
+static void
+qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	struct qed_iwarp_cm_event_params params;
+	struct mpa_v2_hdr *mpa_v2;
+	union async_output *async_data;
+	u16 mpa_ord, mpa_ird;
+	u8 mpa_hdr_size = 0;
+	u8 mpa_rev;
+
+	async_data = &ep->ep_buffer_virt->async_output;
+
+	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
+		   async_data->mpa_request.ulp_data_len,
+		   mpa_rev, *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata)));
+
+	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+		/* Read ord/ird values from private data buffer */
+		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
+		mpa_hdr_size = sizeof(*mpa_v2);
+
+		mpa_ord = ntohs(mpa_v2->ord);
+		mpa_ird = ntohs(mpa_v2->ird);
+
+		/* Temprary store in cm_info incoming ord/ird requested, later
+		 * replace with negotiated value during accept
+		 */
+		ep->cm_info.ord = (u8)min_t(u16,
+					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
+					    QED_IWARP_ORD_DEFAULT);
+
+		ep->cm_info.ird = (u8)min_t(u16,
+					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
+					    QED_IWARP_IRD_DEFAULT);
+
+		/* Peer2Peer negotiation */
+		ep->rtr_type = MPA_RTR_TYPE_NONE;
+		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
+			if (mpa_ord & MPA_V2_WRITE_RTR)
+				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
+
+			if (mpa_ord & MPA_V2_READ_RTR)
+				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
+
+			if (mpa_ird & MPA_V2_SEND_RTR)
+				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
+
+			ep->rtr_type &= iwarp_info->rtr_type;
+
+			/* if we're left with no match send our capabilities */
+			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
+				ep->rtr_type = iwarp_info->rtr_type;
+		}
+
+		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
+	} else {
+		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
+		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
+		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
+		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
+		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
+
+	/* Strip mpa v2 hdr from private data before sending to upper layer */
+	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
+
+	ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
+				       mpa_hdr_size;
+
+	params.event = QED_IWARP_EVENT_MPA_REQUEST;
+	params.cm_info = &ep->cm_info;
+	params.ep_context = ep;
+	params.status = 0;
+
+	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
+	ep->event_cb(ep->cb_context, &params);
+}
+
+static int
+qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+	struct qed_sp_init_data init_data;
+	dma_addr_t async_output_phys;
+	struct qed_spq_entry *p_ent;
+	dma_addr_t out_pdata_phys;
+	dma_addr_t in_pdata_phys;
+	struct qed_rdma_qp *qp;
+	bool reject;
+	int rc;
+
+	if (!ep)
+		return -EINVAL;
+
+	qp = ep->qp;
+	reject = !qp;
+
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = reject ? ep->tcp_cid : qp->icid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+				 PROTOCOLID_IWARP, &init_data);
+	if (rc)
+		return rc;
+
+	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
+	out_pdata_phys = ep->ep_buffer_phys +
+			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
+	DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
+		       out_pdata_phys);
+	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
+	    ep->cm_info.private_data_len;
+	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
+
+	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
+	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
+
+	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
+
+	in_pdata_phys = ep->ep_buffer_phys +
+			offsetof(struct qed_iwarp_ep_memory, in_pdata);
+	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
+	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
+		       in_pdata_phys);
+	p_mpa_ramrod->incoming_ulp_buffer.len =
+	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+	async_output_phys = ep->ep_buffer_phys +
+			    offsetof(struct qed_iwarp_ep_memory, async_output);
+	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
+		       async_output_phys);
+	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+	if (!reject) {
+		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
+			       qp->shared_queue_phys_addr);
+		p_mpa_ramrod->stats_counter_id =
+		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
+	} else {
+		p_mpa_ramrod->common.reject = 1;
+	}
+
+	p_mpa_ramrod->mode = ep->mpa_rev;
+	SET_FIELD(p_mpa_ramrod->rtr_pref,
+		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
+
+	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+	if (!reject)
+		ep->cid = qp->icid;	/* Now they're migrated. */
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_RDMA,
+		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
+		   reject ? 0xffff : qp->icid,
+		   ep->tcp_cid,
+		   rc,
+		   ep->cm_info.ird,
+		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
+	return rc;
+}
+
+static void
+qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	ep->state = QED_IWARP_EP_INIT;
+	if (ep->qp)
+		ep->qp->ep = NULL;
+	ep->qp = NULL;
+	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
+
+	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+		/* We don't care about the return code, it's ok if tcp_cid
+		 * remains invalid...in this case we'll defer allocation
+		 */
+		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+	}
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	list_del(&ep->list_entry);
+	list_add_tail(&ep->list_entry,
+		      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+}
+
+#define QED_IWARP_CONNECT_MODE_STRING(ep) \
+	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
+
+/* Called as a result of the event:
+ * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
+ */
+static void
+qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
+		       struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+	struct qed_iwarp_cm_event_params params;
+
+	params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+	params.cm_info = &ep->cm_info;
+
+	params.ep_context = ep;
+
+	ep->state = QED_IWARP_EP_CLOSED;
+
+	switch (fw_return_code) {
+	case RDMA_RETURN_OK:
+		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
+		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
+		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
+		ep->state = QED_IWARP_EP_ESTABLISHED;
+		params.status = 0;
+		break;
+	default:
+		params.status = -ECONNRESET;
+		break;
+	}
+
+	ep->event_cb(ep->cb_context, &params);
+}
+
+static void
+qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
+			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
+{
+	struct mpa_v2_hdr *mpa_v2_params;
+	u16 mpa_ird, mpa_ord;
+
+	*mpa_data_size = 0;
+	if (MPA_REV2(ep->mpa_rev)) {
+		mpa_v2_params =
+		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
+		*mpa_data_size = sizeof(*mpa_v2_params);
+
+		mpa_ird = (u16)ep->cm_info.ird;
+		mpa_ord = (u16)ep->cm_info.ord;
+
+		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
+			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
+
+			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
+				mpa_ird |= MPA_V2_SEND_RTR;
+
+			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
+				mpa_ord |= MPA_V2_WRITE_RTR;
+
+			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
+				mpa_ord |= MPA_V2_READ_RTR;
+		}
+
+		mpa_v2_params->ird = htons(mpa_ird);
+		mpa_v2_params->ord = htons(mpa_ord);
+
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
+			   mpa_v2_params->ird,
+			   mpa_v2_params->ord,
+			   *((u32 *)mpa_v2_params),
+			   mpa_ord & MPA_V2_IRD_ORD_MASK,
+			   mpa_ird & MPA_V2_IRD_ORD_MASK,
+			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
+			   !!(mpa_ird & MPA_V2_SEND_RTR),
+			   !!(mpa_ord & MPA_V2_WRITE_RTR),
+			   !!(mpa_ord & MPA_V2_READ_RTR));
+	}
+}
+
+static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
+{
+	struct qed_iwarp_ep *ep = NULL;
+	int rc;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+		DP_ERR(p_hwfn, "Ep list is empty\n");
+		goto out;
+	}
+
+	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+			      struct qed_iwarp_ep, list_entry);
+
+	/* in some cases we could have failed allocating a tcp cid when added
+	 * from accept / failure... retry now..this is not the common case.
+	 */
+	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+
+		/* if we fail we could look for another entry with a valid
+		 * tcp_cid, but since we don't expect to reach this anyway
+		 * it's not worth the handling
+		 */
+		if (rc) {
+			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+			ep = NULL;
+			goto out;
+		}
+	}
+
+	list_del(&ep->list_entry);
+
+out:
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	return ep;
+}
+
 #define QED_IWARP_MAX_CID_CLEAN_TIME  100
 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
 
@@ -465,20 +1057,213 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 
 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
 {
+	int rc;
+	int i;
+
+	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
+					    &p_hwfn->p_rdma_info->tcp_cid_map);
+	if (rc)
+		return rc;
+
+	/* Now free the tcp cids from the main cid map */
+	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
+		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
+
 	/* Now wait for all cids to be completed */
 	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
 					      &p_hwfn->p_rdma_info->cid_map);
 }
 
+static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
+{
+	struct qed_iwarp_ep *ep;
+
+	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+				      struct qed_iwarp_ep, list_entry);
+
+		if (!ep) {
+			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+			break;
+		}
+		list_del(&ep->list_entry);
+
+		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
+			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
+
+		qed_iwarp_destroy_ep(p_hwfn, ep, false);
+	}
+}
+
+static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
+{
+	struct qed_iwarp_ep *ep;
+	int rc = 0;
+	int count;
+	u32 cid;
+	int i;
+
+	count = init ? QED_IWARP_PREALLOC_CNT : 1;
+	for (i = 0; i < count; i++) {
+		rc = qed_iwarp_create_ep(p_hwfn, &ep);
+		if (rc)
+			return rc;
+
+		/* During initialization we allocate from the main pool,
+		 * afterwards we allocate only from the tcp_cid.
+		 */
+		if (init) {
+			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+			if (rc)
+				goto err;
+			qed_iwarp_set_tcp_cid(p_hwfn, cid);
+		} else {
+			/* We don't care about the return code, it's ok if
+			 * tcp_cid remains invalid...in this case we'll
+			 * defer allocation
+			 */
+			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
+		}
+
+		ep->tcp_cid = cid;
+
+		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+		list_add_tail(&ep->list_entry,
+			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	}
+
+	return rc;
+
+err:
+	qed_iwarp_destroy_ep(p_hwfn, ep, false);
+
+	return rc;
+}
+
 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
 {
+	int rc;
+
+	/* Allocate bitmap for tcp cid. These are used by passive side
+	 * to ensure it can allocate a tcp cid during dpc that was
+	 * pre-acquired and doesn't require dynamic allocation of ilt
+	 */
+	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
+	if (rc) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "Failed to allocate tcp cid, rc = %d\n", rc);
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
 	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 
-	return 0;
+	return qed_iwarp_prealloc_ep(p_hwfn, true);
 }
 
 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 {
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
+}
+
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
+{
+	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+	struct qed_iwarp_ep *ep;
+	u8 mpa_data_size = 0;
+	int rc;
+
+	ep = (struct qed_iwarp_ep *)iparams->ep_context;
+	if (!ep) {
+		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+		   iparams->qp->icid, ep->tcp_cid);
+
+	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
+	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+			   iparams->qp->icid,
+			   ep->tcp_cid, iparams->ord, iparams->ord);
+		return -EINVAL;
+	}
+
+	qed_iwarp_prealloc_ep(p_hwfn, false);
+
+	ep->cb_context = iparams->cb_context;
+	ep->qp = iparams->qp;
+	ep->qp->ep = ep;
+
+	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+		/* Negotiate ord/ird: if upperlayer requested ord larger than
+		 * ird advertised by remote, we need to decrease our ord
+		 */
+		if (iparams->ord > ep->cm_info.ird)
+			iparams->ord = ep->cm_info.ird;
+
+		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
+		    (iparams->ird == 0))
+			iparams->ird = 1;
+	}
+
+	/* Update cm_info ord/ird to be negotiated values */
+	ep->cm_info.ord = iparams->ord;
+	ep->cm_info.ird = iparams->ird;
+
+	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+	ep->cm_info.private_data_len = iparams->private_data_len +
+				       mpa_data_size;
+
+	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+	       iparams->private_data, iparams->private_data_len);
+
+	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
+	if (rc)
+		qed_iwarp_modify_qp(p_hwfn,
+				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
+
+	return rc;
+}
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
+{
+	struct qed_hwfn *p_hwfn = rdma_cxt;
+	struct qed_iwarp_ep *ep;
+	u8 mpa_data_size = 0;
+
+	ep = iparams->ep_context;
+	if (!ep) {
+		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
+
+	ep->cb_context = iparams->cb_context;
+	ep->qp = NULL;
+
+	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+	ep->cm_info.private_data_len = iparams->private_data_len +
+				       mpa_data_size;
+
+	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+	       iparams->private_data, iparams->private_data_len);
+
+	return qed_iwarp_mpa_offload(p_hwfn, ep);
 }
 
 static void
@@ -526,6 +1311,38 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 	return rc;
 }
 
+static bool
+qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
+{
+	struct qed_iwarp_ep *ep = NULL;
+	bool found = false;
+
+	list_for_each_entry(ep,
+			    &p_hwfn->p_rdma_info->iwarp.ep_list,
+			    list_entry) {
+		if ((ep->cm_info.local_port == cm_info->local_port) &&
+		    (ep->cm_info.remote_port == cm_info->remote_port) &&
+		    (ep->cm_info.vlan == cm_info->vlan) &&
+		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
+			    sizeof(cm_info->local_ip)) &&
+		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
+			    sizeof(cm_info->remote_ip))) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		DP_NOTICE(p_hwfn,
+			  "SYN received on active connection - dropping\n");
+		qed_iwarp_print_cm_info(p_hwfn, cm_info);
+
+		return true;
+	}
+
+	return false;
+}
+
 static struct qed_iwarp_listener *
 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
 		       struct qed_iwarp_cm_info *cm_info)
@@ -596,9 +1413,8 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 
 	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
 
-	memcpy(remote_mac_addr, ethh->h_source, ETH_ALEN);
-
-	memcpy(local_mac_addr, ethh->h_dest, ETH_ALEN);
+	ether_addr_copy(remote_mac_addr, ethh->h_source);
+	ether_addr_copy(local_mac_addr, ethh->h_dest);
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
 		   eth_type, ethh->h_source);
@@ -661,9 +1477,12 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 	struct qed_hwfn *p_hwfn = cxt;
 	u8 remote_mac_addr[ETH_ALEN];
 	u8 local_mac_addr[ETH_ALEN];
+	struct qed_iwarp_ep *ep;
 	int tcp_start_offset;
+	u8 ts_hdr_size = 0;
 	u8 ll2_syn_handle;
 	int payload_len;
+	u32 hdr_size;
 	int rc;
 
 	memset(&cm_info, 0, sizeof(cm_info));
@@ -719,6 +1538,49 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 	}
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
+	/* There may be an open ep on this connection if this is a syn
+	 * retrasnmit... need to make sure there isn't...
+	 */
+	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
+		goto err;
+
+	ep = qed_iwarp_get_free_ep(p_hwfn);
+	if (!ep)
+		goto err;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
+	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
+
+	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
+
+	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+
+	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
+		   ts_hdr_size;
+	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
+	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+	ep->event_cb = listener->event_cb;
+	ep->cb_context = listener->cb_context;
+	ep->connect_mode = TCP_CONNECT_PASSIVE;
+
+	ep->syn = buf;
+	ep->syn_ip_payload_length = (u16)payload_len;
+	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
+			   tcp_start_offset;
+
+	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+	if (rc) {
+		qed_iwarp_return_ep(p_hwfn, ep);
+		goto err;
+	}
+
+	return;
 err:
 	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
 }
@@ -905,7 +1767,12 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 
 	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
 
+	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
+				MPA_RTR_TYPE_ZERO_WRITE |
+				MPA_RTR_TYPE_ZERO_READ;
+
 	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
 	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
 
 	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
@@ -918,6 +1785,7 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 	int rc;
 
+	qed_iwarp_free_prealloc_ep(p_hwfn);
 	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
 	if (rc)
 		return rc;
@@ -927,11 +1795,70 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
 }
 
+void
+qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
+			   struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+	/* Done with the SYN packet, post back to ll2 rx */
+	qed_iwarp_ll2_post_rx(p_hwfn, ep->syn,
+			      p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
+	ep->syn = NULL;
+
+	/* If connect failed - upper layer doesn't know about it */
+	qed_iwarp_mpa_received(p_hwfn, ep);
+}
+
+static inline bool
+qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	if (!ep || (ep->sig != QED_EP_SIG)) {
+		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
+		return false;
+	}
+
+	return true;
+}
+
 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
 				 u8 fw_event_code, u16 echo,
 				 union event_ring_data *data,
 				 u8 fw_return_code)
 {
+	struct regpair *fw_handle = &data->rdma_data.async_handle;
+	struct qed_iwarp_ep *ep = NULL;
+	u16 cid;
+
+	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
+						       fw_handle->lo);
+
+	switch (fw_event_code) {
+	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
+		/* Async completion after TCP 3-way handshake */
+		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+			return -EINVAL;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
+			   ep->tcp_cid, fw_return_code);
+		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
+		break;
+	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
+		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+			return -EINVAL;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
+			   ep->cid, fw_return_code);
+		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
+		break;
+	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
+		cid = (u16)le32_to_cpu(fw_handle->lo);
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
+		qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+		break;
+	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index 29005ac..bedac98 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -42,6 +42,8 @@ enum qed_iwarp_qp_state {
 
 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
 
+#define QED_IWARP_PREALLOC_CNT  (256)
+
 #define QED_IWARP_LL2_SYN_TX_SIZE       (128)
 #define QED_IWARP_LL2_SYN_RX_SIZE       (256)
 #define QED_IWARP_MAX_SYN_PKT_SIZE      (128)
@@ -55,6 +57,8 @@ struct qed_iwarp_ll2_buff {
 
 struct qed_iwarp_info {
 	struct list_head listen_list;	/* qed_iwarp_listener */
+	struct list_head ep_list;	/* qed_iwarp_ep */
+	struct list_head ep_free_list;	/* pre-allocated ep's */
 	spinlock_t iw_lock;	/* for iwarp resources */
 	spinlock_t qp_lock;	/* for teardown races */
 	u32 rcv_wnd_scale;
@@ -68,6 +72,61 @@ struct qed_iwarp_info {
 	enum mpa_rtr_type rtr_type;
 };
 
+enum qed_iwarp_ep_state {
+	QED_IWARP_EP_INIT,
+	QED_IWARP_EP_MPA_REQ_RCVD,
+	QED_IWARP_EP_MPA_OFFLOADED,
+	QED_IWARP_EP_ESTABLISHED,
+	QED_IWARP_EP_CLOSED
+};
+
+union async_output {
+	struct iwarp_eqe_data_mpa_async_completion mpa_response;
+	struct iwarp_eqe_data_tcp_async_completion mpa_request;
+};
+
+#define QED_MAX_PRIV_DATA_LEN (512)
+struct qed_iwarp_ep_memory {
+	u8 in_pdata[QED_MAX_PRIV_DATA_LEN];
+	u8 out_pdata[QED_MAX_PRIV_DATA_LEN];
+	union async_output async_output;
+};
+
+/* Endpoint structure represents a TCP connection. This connection can be
+ * associated with a QP or not (in which case QP==NULL)
+ */
+struct qed_iwarp_ep {
+	struct list_head list_entry;
+	struct qed_rdma_qp *qp;
+	struct qed_iwarp_ep_memory *ep_buffer_virt;
+	dma_addr_t ep_buffer_phys;
+	enum qed_iwarp_ep_state state;
+	int sig;
+	struct qed_iwarp_cm_info cm_info;
+	enum tcp_connect_mode connect_mode;
+	enum mpa_rtr_type rtr_type;
+	enum mpa_negotiation_mode mpa_rev;
+	u32 tcp_cid;
+	u32 cid;
+	u16 mss;
+	u8 remote_mac_addr[6];
+	u8 local_mac_addr[6];
+	bool mpa_reply_processed;
+
+	/* For Passive side - syn packet related data */
+	u16 syn_ip_payload_length;
+	struct qed_iwarp_ll2_buff *syn;
+	dma_addr_t syn_phy_addr;
+
+	/* The event_cb function is called for asynchrounous events associated
+	 * with the ep. It is initialized at different entry points depending
+	 * on whether the ep is the tcp connection active side or passive side
+	 * The cb_context is passed to the event_cb function.
+	 */
+	iwarp_event_handler event_cb;
+	void *cb_context;
+};
+
 struct qed_iwarp_listener {
 	struct list_head list_entry;
 
@@ -115,6 +174,9 @@ void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 			struct qed_iwarp_listen_in *iparams,
 			struct qed_iwarp_listen_out *oparams);
 
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams);
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
 
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 27ea54b..0ba5ec8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1227,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
 	return action;
 }
 
-static void qed_set_fw_mac_addr(__le16 *fw_msb,
-				__le16 *fw_mid,
-				__le16 *fw_lsb,
-				u8 *mac)
-{
-	((u8 *)fw_msb)[0] = mac[1];
-	((u8 *)fw_msb)[1] = mac[0];
-	((u8 *)fw_mid)[0] = mac[3];
-	((u8 *)fw_mid)[1] = mac[2];
-	((u8 *)fw_lsb)[0] = mac[5];
-	((u8 *)fw_lsb)[1] = mac[4];
-}
-
 static int
 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
 			u16 opaque_fid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 90e4e0f..18ec9cb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -85,6 +85,7 @@ struct qed_rdma_info {
 	struct qed_bmap qp_map;
 	struct qed_bmap srq_map;
 	struct qed_bmap cid_map;
+	struct qed_bmap tcp_cid_map;
 	struct qed_bmap real_cid_map;
 	struct qed_bmap dpi_map;
 	struct qed_bmap toggle_bits;
@@ -167,6 +168,7 @@ struct qed_rdma_qp {
 
 	void *shared_queue;
 	dma_addr_t shared_queue_phys_addr;
+	struct qed_iwarp_ep *ep;
 };
 
 #if IS_ENABLED(CONFIG_QED_RDMA)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index c3752c5..ab4ad8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -111,6 +111,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
 	struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
 	struct rdma_srq_modify_ramrod_data rdma_modify_srq;
 	struct iwarp_create_qp_ramrod_data iwarp_create_qp;
+	struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
+	struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
 	struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
 	struct iwarp_init_func_ramrod_data iwarp_init_func;
 	struct fcoe_init_ramrod_params fcoe_init;
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 885ae13..39e2a2a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -38,6 +38,8 @@
 #include <linux/slab.h>
 
 /* dma_addr_t manip */
+#define PTR_LO(x)               ((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x)               ((u32)((((uintptr_t)(x)) >> 16) >> 16))
 #define DMA_LO_LE(x)		cpu_to_le32(lower_32_bits(x))
 #define DMA_HI_LE(x)		cpu_to_le32(upper_32_bits(x))
 #define DMA_REGPAIR_LE(x, val)	do { \
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 28df568..c4c241f 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -471,7 +471,8 @@ struct qed_rdma_counters_out_params {
 #define QED_ROCE_TX_FRAG_FAILURE        (2)
 
 enum qed_iwarp_event_type {
-	QED_IWARP_EVENT_MPA_REQUEST,	/* Passive side request received */
+	QED_IWARP_EVENT_MPA_REQUEST,	  /* Passive side request received */
+	QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
 };
 
 enum qed_tcp_ip_version {
@@ -516,6 +517,23 @@ struct qed_iwarp_listen_out {
 	void *handle;
 };
 
+struct qed_iwarp_accept_in {
+	void *ep_context;
+	void *cb_context;
+	struct qed_rdma_qp *qp;
+	const void *private_data;
+	u16 private_data_len;
+	u8 ord;
+	u8 ird;
+};
+
+struct qed_iwarp_reject_in {
+	void *ep_context;
+	void *cb_context;
+	const void *private_data;
+	u16 private_data_len;
+};
+
 struct qed_roce_ll2_header {
 	void *vaddr;
 	dma_addr_t baddr;
@@ -626,6 +644,12 @@ struct qed_rdma_ops {
 				   struct qed_iwarp_listen_in *iparams,
 				   struct qed_iwarp_listen_out *oparams);
 
+	int (*iwarp_accept)(void *rdma_cxt,
+			    struct qed_iwarp_accept_in *iparams);
+
+	int (*iwarp_reject)(void *rdma_cxt,
+			    struct qed_iwarp_reject_in *iparams);
+
 	int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
 
 };
-- 
1.8.3.1

  parent reply	other threads:[~2017-07-02  7:29 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-02  7:29 [PATCH net-next 00/12] qed: Add iWARP support for QL4xxxx Michal Kalderon
2017-07-02  7:29 ` Michal Kalderon
2017-07-02  7:29 ` [PATCH net-next 03/12] qed: Rename some ll2 related defines Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon
2017-07-02  7:29 ` [PATCH net-next 06/12] qed: iWARP CM add listener functions and initial SYN processing Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon
2017-07-02  7:29 ` Michal Kalderon [this message]
2017-07-02  7:29   ` [PATCH net-next 07/12] qed: iWARP CM add passive side connect Michal Kalderon
2017-07-02  7:29 ` [PATCH net-next 10/12] qed: iWARP CM add error handling Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon
     [not found] ` <1498980572-29519-1-git-send-email-Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
2017-07-02  7:29   ` [PATCH net-next 01/12] qed: Introduce iWARP personality Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 02/12] qed: Implement iWARP initialization, teardown and qp operations Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 04/12] qed: Add iWARP support in ll2 connections Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 05/12] qed: iWARP CM - setup a ll2 connection for handling SYN packets Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 08/12] qed: iWARP CM add active side connect Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 09/12] qed: iWARP implement disconnect flows Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 11/12] qed: Add iWARP protocol support in context allocation Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 12/12] qed: Add iWARP support for physical queue allocation Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-03  8:47   ` [PATCH net-next 00/12] qed: Add iWARP support for QL4xxxx David Miller
     [not found]     ` <20170703.014708.552467938183415134.davem-fT/PcQaiUtIeIZ0/mPfg9Q@public.gmane.org>
2017-07-03  8:59       ` David Miller
2017-07-03 18:31         ` Kalderon, Michal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1498980572-29519-8-git-send-email-Michal.Kalderon@cavium.com \
    --to=michal.kalderon@cavium.com \
    --cc=ariel.elior@cavium.com \
    --cc=davem@davemloft.net \
    --cc=dledford@redhat.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=ram.amrani@cavium.com \
    --cc=yuval.mintz@cavium.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.