All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] Add bnx2i driver.
@ 2009-05-23 21:11 Michael Chan
  2009-05-23 21:11 ` [PATCH 1/4] iscsi class: Add new NETLINK_ISCSI messages for cnic/bnx2i driver Michael Chan
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Michael Chan @ 2009-05-23 21:11 UTC (permalink / raw)
  To: James.Bottomley, michaelc; +Cc: davem, linux-scsi, open-iscsi, anilgv, benli


James, the next 4 patches are our latest to add the bnx2i
driver.  We've fixed up all the issues that Mike brought up.
Please consider applying them to scsi-misc-2.6.

Thanks.


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/4] iscsi class: Add new NETLINK_ISCSI messages for cnic/bnx2i driver.
  2009-05-23 21:11 [PATCH 0/4] Add bnx2i driver Michael Chan
@ 2009-05-23 21:11 ` Michael Chan
  2009-05-23 21:11 ` [PATCH 2/4] bnx2: Add support for CNIC driver Michael Chan
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: Michael Chan @ 2009-05-23 21:11 UTC (permalink / raw)
  To: James.Bottomley, michaelc; +Cc: davem, linux-scsi, open-iscsi, anilgv, benli

Add ISCSI_NETLINK messages for iSCSI NICs to get information such as
path from userspace.  Original iscsid messages are now always sent as
multicast to group 1.  The new messages are sent to group 2.

The multicast changes were made by Mike Christie.

Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Benjamin Li <benli@broadcom.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
---
 drivers/scsi/scsi_transport_iscsi.c |  122 ++++++++++++++++++++++++----------
 include/scsi/iscsi_if.h             |   42 ++++++++++++
 include/scsi/scsi_transport_iscsi.h |    5 ++
 3 files changed, 133 insertions(+), 36 deletions(-)

diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index d69a53a..e4215fe 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,7 +37,6 @@
 #define ISCSI_TRANSPORT_VERSION "2.0-870"
 
 struct iscsi_internal {
-	int daemon_pid;
 	struct scsi_transport_template t;
 	struct iscsi_transport *iscsi_transport;
 	struct list_head list;
@@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
 }
 
 static int
-iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp)
+iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
 {
-	return netlink_broadcast(nls, skb, 0, 1, gfp);
-}
-
-static int
-iscsi_unicast_skb(struct sk_buff *skb, int pid)
-{
-	int rc;
-
-	rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
-	if (rc < 0) {
-		printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
+	return nlmsg_multicast(nls, skb, 0, group, gfp);
 }
 
 int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
 		return -ENOMEM;
 	}
 
-	nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
 	ev = NLMSG_DATA(nlh);
 	memset(ev, 0, sizeof(*ev));
 	ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
 	memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
 	memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
 
-	return iscsi_unicast_skb(skb, priv->daemon_pid);
+	return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
 }
 EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
 
+int iscsi_offload_mesg(struct Scsi_Host *shost,
+		       struct iscsi_transport *transport, uint32_t type,
+		       char *data, uint16_t data_size)
+{
+	struct nlmsghdr	*nlh;
+	struct sk_buff *skb;
+	struct iscsi_uevent *ev;
+	int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+
+	skb = alloc_skb(len, GFP_NOIO);
+	if (!skb) {
+		printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
+		return -ENOMEM;
+	}
+
+	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+	ev = NLMSG_DATA(nlh);
+	memset(ev, 0, sizeof(*ev));
+	ev->type = type;
+	ev->transport_handle = iscsi_handle(transport);
+	switch (type) {
+	case ISCSI_KEVENT_PATH_REQ:
+		ev->r.req_path.host_no = shost->host_no;
+		break;
+	case ISCSI_KEVENT_IF_DOWN:
+		ev->r.notify_if_down.host_no = shost->host_no;
+		break;
+	}
+
+	memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+	return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
+}
+EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+
 void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
 {
 	struct nlmsghdr	*nlh;
@@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
 		return;
 	}
 
-	nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
 	ev = NLMSG_DATA(nlh);
 	ev->transport_handle = iscsi_handle(conn->transport);
 	ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
 	ev->r.connerror.cid = conn->cid;
 	ev->r.connerror.sid = iscsi_conn_get_sid(conn);
 
-	iscsi_broadcast_skb(skb, GFP_ATOMIC);
+	iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
 
 	iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
 			      error);
@@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
 EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
 
 static int
-iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
-		      void *payload, int size)
+iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
+		    void *payload, int size)
 {
 	struct sk_buff	*skb;
 	struct nlmsghdr	*nlh;
@@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
 		return -ENOMEM;
 	}
 
-	nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
+	nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
 	nlh->nlmsg_flags = flags;
 	memcpy(NLMSG_DATA(nlh), payload, size);
-	return iscsi_unicast_skb(skb, pid);
+	return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
 }
 
 static int
@@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
 			return -ENOMEM;
 		}
 
-		nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0,
+		nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
 				      (len - sizeof(*nlhstat)), 0);
 		evstat = NLMSG_DATA(nlhstat);
 		memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
 		skb_trim(skbstat, NLMSG_ALIGN(actual_size));
 		nlhstat->nlmsg_len = actual_size;
 
-		err = iscsi_unicast_skb(skbstat, priv->daemon_pid);
+		err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
+					  GFP_ATOMIC);
 	} while (err < 0 && err != -ECONNREFUSED);
 
 	return err;
@@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
 		return -ENOMEM;
 	}
 
-	nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+	nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
 	ev = NLMSG_DATA(nlh);
 	ev->transport_handle = iscsi_handle(session->transport);
 
@@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
 	 * this will occur if the daemon is not up, so we just warn
 	 * the user and when the daemon is restarted it will handle it
 	 */
-	rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
+	rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
 	if (rc == -ESRCH)
 		iscsi_cls_session_printk(KERN_ERR, session,
 					 "Cannot notify userspace of session "
@@ -1393,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
 }
 
 static int
-iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_path *params;
+	int err;
+
+	if (!transport->set_path)
+		return -ENOSYS;
+
+	shost = scsi_host_lookup(ev->u.set_path.host_no);
+	if (!shost) {
+		printk(KERN_ERR "set path could not find host no %u\n",
+		       ev->u.set_path.host_no);
+		return -ENODEV;
+	}
+
+	params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
+	err = transport->set_path(shost, params);
+
+	scsi_host_put(shost);
+	return err;
+}
+
+static int
+iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 {
 	int err = 0;
 	struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1403,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 	struct iscsi_cls_conn *conn;
 	struct iscsi_endpoint *ep = NULL;
 
+	if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
+		*group = ISCSI_NL_GRP_UIP;
+	else
+		*group = ISCSI_NL_GRP_ISCSID;
+
 	priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
 	if (!priv)
 		return -EINVAL;
@@ -1411,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 	if (!try_module_get(transport->owner))
 		return -EINVAL;
 
-	priv->daemon_pid = NETLINK_CREDS(skb)->pid;
-
 	switch (nlh->nlmsg_type) {
 	case ISCSI_UEVENT_CREATE_SESSION:
 		err = iscsi_if_create_session(priv, ep, ev,
@@ -1506,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 	case ISCSI_UEVENT_SET_HOST_PARAM:
 		err = iscsi_set_host_param(transport, ev);
 		break;
+	case ISCSI_UEVENT_PATH_UPDATE:
+		err = iscsi_set_path(transport, ev);
+		break;
 	default:
 		err = -ENOSYS;
 		break;
@@ -1528,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb)
 		uint32_t rlen;
 		struct nlmsghdr	*nlh;
 		struct iscsi_uevent *ev;
+		uint32_t group;
 
 		nlh = nlmsg_hdr(skb);
 		if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1540,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb)
 		if (rlen > skb->len)
 			rlen = skb->len;
 
-		err = iscsi_if_recv_msg(skb, nlh);
+		err = iscsi_if_recv_msg(skb, nlh, &group);
 		if (err) {
 			ev->type = ISCSI_KEVENT_IF_ERROR;
 			ev->iferror = err;
@@ -1554,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb)
 			 */
 			if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
 				break;
-			err = iscsi_if_send_reply(
-				NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+			err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
 				nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
 		} while (err < 0 && err != -ECONNREFUSED);
 		skb_pull(skb, rlen);
@@ -1803,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
 	if (!priv)
 		return NULL;
 	INIT_LIST_HEAD(&priv->list);
-	priv->daemon_pid = -1;
 	priv->iscsi_transport = tt;
 	priv->t.user_scan = iscsi_user_scan;
 	priv->t.create_work_queue = 1;
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 2c1a4af..4426f00 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -22,6 +22,11 @@
 #define ISCSI_IF_H
 
 #include <scsi/iscsi_proto.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#define ISCSI_NL_GRP_ISCSID	1
+#define ISCSI_NL_GRP_UIP	2
 
 #define UEVENT_BASE			10
 #define KEVENT_BASE			100
@@ -53,6 +58,8 @@ enum iscsi_uevent_e {
 	ISCSI_UEVENT_CREATE_BOUND_SESSION		= UEVENT_BASE + 18,
 	ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST	= UEVENT_BASE + 19,
 
+	ISCSI_UEVENT_PATH_UPDATE	= UEVENT_BASE + 20,
+
 	/* up events */
 	ISCSI_KEVENT_RECV_PDU		= KEVENT_BASE + 1,
 	ISCSI_KEVENT_CONN_ERROR		= KEVENT_BASE + 2,
@@ -60,6 +67,9 @@ enum iscsi_uevent_e {
 	ISCSI_KEVENT_DESTROY_SESSION	= KEVENT_BASE + 4,
 	ISCSI_KEVENT_UNBIND_SESSION	= KEVENT_BASE + 5,
 	ISCSI_KEVENT_CREATE_SESSION	= KEVENT_BASE + 6,
+
+	ISCSI_KEVENT_PATH_REQ		= KEVENT_BASE + 7,
+	ISCSI_KEVENT_IF_DOWN		= KEVENT_BASE + 8,
 };
 
 enum iscsi_tgt_dscvr {
@@ -159,6 +169,9 @@ struct iscsi_uevent {
 			uint32_t	param; /* enum iscsi_host_param */
 			uint32_t	len;
 		} set_host_param;
+		struct msg_set_path {
+			uint32_t	host_no;
+		} set_path;
 	} u;
 	union {
 		/* messages k -> u */
@@ -192,10 +205,39 @@ struct iscsi_uevent {
 		struct msg_transport_connect_ret {
 			uint64_t	handle;
 		} ep_connect_ret;
+		struct msg_req_path {
+			uint32_t	host_no;
+		} req_path;
+		struct msg_notify_if_down {
+			uint32_t	host_no;
+		} notify_if_down;
 	} r;
 } __attribute__ ((aligned (sizeof(uint64_t))));
 
 /*
+ * To keep the struct iscsi_uevent size the same for userspace code
+ * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and
+ * ISCSI_KEVENT_PATH_REQ is defined separately and comes after the
+ * struct iscsi_uevent in the NETLINK_ISCSI message.
+ */
+struct iscsi_path {
+	uint64_t	handle;
+	uint8_t		mac_addr[6];
+	uint8_t		mac_addr_old[6];
+	uint32_t	ip_addr_len;	/* 4 or 16 */
+	union {
+		struct in_addr	v4_addr;
+		struct in6_addr	v6_addr;
+	} src;
+	union {
+		struct in_addr	v4_addr;
+		struct in6_addr	v6_addr;
+	} dst;
+	uint16_t	vlan_id;
+	uint16_t	pmtu;
+} __attribute__ ((aligned (sizeof(uint64_t))));
+
+/*
  * Common error codes
  */
 enum iscsi_err {
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 8cb7a31..349c7f3 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -133,6 +133,7 @@ struct iscsi_transport {
 	void (*ep_disconnect) (struct iscsi_endpoint *ep);
 	int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
 			  uint32_t enable, struct sockaddr *dst_addr);
+	int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params);
 };
 
 /*
@@ -149,6 +150,10 @@ extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn,
 extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
 			  char *data, uint32_t data_size);
 
+extern int iscsi_offload_mesg(struct Scsi_Host *shost,
+			      struct iscsi_transport *transport, uint32_t type,
+			      char *data, uint16_t data_size);
+
 struct iscsi_cls_conn {
 	struct list_head conn_list;	/* item in connlist */
 	void *dd_data;			/* LLD private data */
-- 
1.5.6.GIT



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/4] bnx2: Add support for CNIC driver.
  2009-05-23 21:11 [PATCH 0/4] Add bnx2i driver Michael Chan
  2009-05-23 21:11 ` [PATCH 1/4] iscsi class: Add new NETLINK_ISCSI messages for cnic/bnx2i driver Michael Chan
@ 2009-05-23 21:11 ` Michael Chan
       [not found] ` <1243113110-29635-1-git-send-email-mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
  2009-05-23 21:11 ` [PATCH 4/4] bnx2i: Add bnx2i iSCSI driver Michael Chan
  3 siblings, 0 replies; 11+ messages in thread
From: Michael Chan @ 2009-05-23 21:11 UTC (permalink / raw)
  To: James.Bottomley, michaelc; +Cc: davem, linux-scsi, open-iscsi, anilgv, benli

Add interface and functions to support a new CNIC driver to drive
the Broadcom bnx2 hardware for iSCSI offload.

Signed-off-by: Michael Chan <mchan@broadcom.com>
Acked-by: David S. Miller <davem@davemloft.net>
---
 drivers/net/bnx2.c |  193 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 drivers/net/bnx2.h |   18 +++++
 2 files changed, 208 insertions(+), 3 deletions(-)

diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b0cb29d..3f5fcb0 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -49,6 +49,10 @@
 #include <linux/firmware.h>
 #include <linux/log2.h>
 
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
 #include "bnx2.h"
 #include "bnx2_fw.h"
 
@@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 	spin_unlock_bh(&bp->indirect_lock);
 }
 
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct drv_ctl_io *io = &info->data.io;
+
+	switch (info->cmd) {
+	case DRV_CTL_IO_WR_CMD:
+		bnx2_reg_wr_ind(bp, io->offset, io->data);
+		break;
+	case DRV_CTL_IO_RD_CMD:
+		io->data = bnx2_reg_rd_ind(bp, io->offset);
+		break;
+	case DRV_CTL_CTX_WR_CMD:
+		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	int sb_id;
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_present = 0;
+		sb_id = bp->irq_nvecs;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_tag = bnapi->last_status_idx;
+		bnapi->cnic_present = 1;
+		sb_id = 0;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+
+	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+	cp->irq_arr[0].status_blk = (void *)
+		((unsigned long) bnapi->status_blk.msi +
+		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+	cp->irq_arr[0].status_blk_num = sb_id;
+	cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			      void *data)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (ops == NULL)
+		return -EINVAL;
+
+	if (cp->drv_state & CNIC_DRV_STATE_REGD)
+		return -EBUSY;
+
+	bp->cnic_data = data;
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	cp->num_irq = 0;
+	cp->drv_state = CNIC_DRV_STATE_REGD;
+
+	bnx2_setup_cnic_irq_info(bp);
+
+	return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	cp->drv_state = 0;
+	bnapi->cnic_present = 0;
+	rcu_assign_pointer(bp->cnic_ops, NULL);
+	synchronize_rcu();
+	return 0;
+}
+
+struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = bp->chip_id;
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->drv_ctl = bnx2_drv_ctl;
+	cp->drv_register_cnic = bnx2_register_cnic;
+	cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+	return cp;
+}
+EXPORT_SYMBOL(bnx2_cnic_probe);
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops) {
+		info.cmd = CNIC_CTL_STOP_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	rcu_read_unlock();
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops) {
+		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+			bnapi->cnic_tag = bnapi->last_status_idx;
+		}
+		info.cmd = CNIC_CTL_START_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	rcu_read_unlock();
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
 static int
 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 {
@@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp)
 static void
 bnx2_netif_stop(struct bnx2 *bp)
 {
+	bnx2_cnic_stop(bp);
 	bnx2_disable_int_sync(bp);
 	if (netif_running(bp->dev)) {
 		bnx2_napi_disable(bp);
@@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp)
 			netif_tx_wake_all_queues(bp->dev);
 			bnx2_napi_enable(bp);
 			bnx2_enable_int(bp);
+			bnx2_cnic_start(bp);
 		}
 	}
 }
@@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
 	if (bnx2_has_fast_work(bnapi))
 		return 1;
 
+#ifdef BCM_CNIC
+	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+		return 1;
+#endif
+
 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
 		return 1;
@@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
 	bp->idle_chk_status_idx = bnapi->last_status_idx;
 }
 
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	struct cnic_ops *c_ops;
+
+	if (!bnapi->cnic_present)
+		return;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+						      bnapi->status_blk.msi);
+	rcu_read_unlock();
+}
+#endif
+
 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
 {
 	struct status_block *sblk = bnapi->status_blk.msi;
@@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
 
 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
 
+#ifdef BCM_CNIC
+		bnx2_poll_cnic(bp, bnapi);
+#endif
+
 		/* bnapi->last_status_idx is used below to tell the hw how
 		 * much work has been processed, so we must read it before
 		 * checking for more work.
@@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp)
 	val = REG_RD(bp, BNX2_MQ_CONFIG);
 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
-	if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
-		val |= BNX2_MQ_CONFIG_HALT_DIS;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			val |= BNX2_MQ_CONFIG_HALT_DIS;
+	}
 
 	REG_WR(bp, BNX2_MQ_CONFIG, val);
 
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
+	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
 	dev->mem_end = dev->mem_start + mem_len;
 	dev->irq = pdev->irq;
 
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5b570e1..a1ff739 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,6 +361,9 @@ struct l2_fhdr {
 #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE	 (1<<28)
 
 #define BNX2_L2CTX_HOST_BDIDX				0x00000004
+#define BNX2_L2CTX_STATUSB_NUM_SHIFT			 16
+#define BNX2_L2CTX_STATUSB_NUM(sb_id)			 \
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
 #define BNX2_L2CTX_HOST_BSEQ				0x00000008
 #define BNX2_L2CTX_NX_BSEQ				0x0000000c
 #define BNX2_L2CTX_NX_BDHADDR_HI			0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
 #define BNX2_RXP_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
 
 #define BNX2_RXP_SCRATCH				0x000e0000
+#define BNX2_RXP_SCRATCH_RXP_FLOOD			 0x000e0024
 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ			 0x000e0038
 #define BNX2_RXP_SCRATCH_RSS_TBL			 0x000e003c
 #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES		 128
@@ -6678,6 +6682,11 @@ struct bnx2_napi {
 	u32 			last_status_idx;
 	u32			int_num;
 
+#ifdef BCM_CNIC
+	u32			cnic_tag;
+	int			cnic_present;
+#endif
+
 	struct bnx2_rx_ring_info	rx_ring;
 	struct bnx2_tx_ring_info	tx_ring;
 };
@@ -6727,6 +6736,11 @@ struct bnx2 {
 	int		tx_ring_size;
 	u32		tx_wake_thresh;
 
+#ifdef BCM_CNIC
+	struct cnic_ops		*cnic_ops;
+	void			*cnic_data;
+#endif
+
 	/* End of fields used in the performance code paths. */
 
 	unsigned int		current_interval;
@@ -6885,6 +6899,10 @@ struct bnx2 {
 
 	u32			idle_chk_status_idx;
 
+#ifdef BCM_CNIC
+	struct cnic_eth_dev	cnic_eth_dev;
+#endif
+
 	const struct firmware	*mips_firmware;
 	const struct firmware	*rv2p_firmware;
 };
-- 
1.5.6.GIT



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/4] cnic: Add new Broadcom CNIC driver.
       [not found] ` <1243113110-29635-1-git-send-email-mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
@ 2009-05-23 21:11   ` Michael Chan
  2009-05-25 15:19     ` Rolf Eike Beer
  2009-05-27  2:35   ` [PATCH 0/4] Add bnx2i driver Mike Christie
  1 sibling, 1 reply; 11+ messages in thread
From: Michael Chan @ 2009-05-23 21:11 UTC (permalink / raw)
  To: James.Bottomley-d9PhHud1JfjCXq6kfMZ53/egYHeGw8Jk,
	michaelc-hcNo3dDEHLuVc3sceRu5cw
  Cc: davem-fT/PcQaiUtIeIZ0/mPfg9Q, linux-scsi-u79uwXL29TY76Z2rM5mHXA,
	open-iscsi-/JYPxA39Uh5TLH3MbocFFw, anilgv-dY08KVG/lbpWk0Htik3J/w,
	benli-dY08KVG/lbpWk0Htik3J/w


The CNIC driver controls BNX2 hardware rings and resources used by
iSCSI.  Most hardware resources for iSCSI are separate from those
used for ethernet networking.

iSCSI uses a separate MAC address and IP address.  The CNIC driver
creates a UIO interface to handle the non-offloaded packets such as
ARP, etc in userspace.

Signed-off-by: Michael Chan <mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
Acked-by: David S. Miller <davem-fT/PcQaiUtIeIZ0/mPfg9Q@public.gmane.org>
---
 drivers/net/Kconfig     |   11 +
 drivers/net/Makefile    |    1 +
 drivers/net/cnic.c      | 2715 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/cnic.h      |  299 ++++++
 drivers/net/cnic_defs.h |  580 ++++++++++
 drivers/net/cnic_if.h   |  299 ++++++
 6 files changed, 3905 insertions(+), 0 deletions(-)
 create mode 100644 drivers/net/cnic.c
 create mode 100644 drivers/net/cnic.h
 create mode 100644 drivers/net/cnic_defs.h
 create mode 100644 drivers/net/cnic_if.h

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d..f3c4a3b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2264,6 +2264,17 @@ config BNX2
 	  To compile this driver as a module, choose M here: the module
 	  will be called bnx2.  This is recommended.
 
+config CNIC
+	tristate "Broadcom CNIC support"
+	depends on BNX2
+	depends on UIO
+	help
+	  This driver supports offload features of Broadcom NetXtremeII
+	  gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cnic.  This is recommended.
+
 config SPIDER_NET
 	tristate "Spider Gigabit Ethernet driver"
 	depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1fc4602..e6f1f8c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
 obj-$(CONFIG_BNX2X) += bnx2x.o
 bnx2x-objs := bnx2x_main.o bnx2x_link.o
 spidernet-y += spider_net.o spider_net_ethtool.o
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 0000000..b900414
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2715 @@
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi-dY08KVG/lbpWk0Htik3J/w@public.gmane.org)
+ * Modified and maintained by: Michael Chan <mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <scsi/iscsi_if.h>
+
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define DRV_MODULE_NAME		"cnic"
+#define PFX DRV_MODULE_NAME	": "
+
+static char version[] __devinitdata =
+	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org> and John(Zongxi) "
+	      "Chen (zongxi-dY08KVG/lbpWk0Htik3J/w@public.gmane.org");
+MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+static LIST_HEAD(cnic_dev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_dev *dev = uinfo->priv;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (cp->uio_dev != -1)
+		return -EBUSY;
+
+	cp->uio_dev = iminor(inode);
+
+	cnic_shutdown_bnx2_rx_ring(dev);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_dev *dev = uinfo->priv;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->uio_dev = -1;
+	return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+	atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+	atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+	atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+	atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+	struct cnic_dev *cdev;
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(cdev, &cnic_dev_list, list) {
+		if (netdev == cdev->netdev) {
+			cnic_hold(cdev);
+			read_unlock(&cnic_dev_lock);
+			return cdev;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+	return NULL;
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_CTX_WR_CMD;
+	io->cid_addr = cid_addr;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_WR_CMD;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_RD_CMD;
+	io->offset = off;
+	ethdev->drv_ctl(dev->netdev, &info);
+	return io->data;
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+	return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+
+	info.cmd = DRV_CTL_COMPLETION_CMD;
+	info.data.comp.comp_count = count;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+			   struct cnic_sock *csk)
+{
+	struct iscsi_path path_req;
+	char *buf = NULL;
+	u16 len = 0;
+	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (cp->uio_dev == -1)
+		return -ENODEV;
+
+	if (csk) {
+		len = sizeof(path_req);
+		buf = (char *) &path_req;
+		memset(&path_req, 0, len);
+
+		msg_type = ISCSI_KEVENT_PATH_REQ;
+		path_req.handle = (u64) csk->l5_cid;
+		if (test_bit(SK_F_IPV6, &csk->flags)) {
+			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+			       sizeof(struct in6_addr));
+			path_req.ip_addr_len = 16;
+		} else {
+			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+			       sizeof(struct in_addr));
+			path_req.ip_addr_len = 4;
+		}
+		path_req.vlan_id = csk->vlan_id;
+		path_req.pmtu = csk->mtu;
+	}
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
+	if (ulp_ops)
+		ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
+	rcu_read_unlock();
+	return 0;
+}
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+				  char *buf, u16 len)
+{
+	int rc = -EINVAL;
+
+	switch (msg_type) {
+	case ISCSI_UEVENT_PATH_UPDATE: {
+		struct cnic_local *cp;
+		u32 l5_cid;
+		struct cnic_sock *csk;
+		struct iscsi_path *path_resp;
+
+		if (len < sizeof(*path_resp))
+			break;
+
+		path_resp = (struct iscsi_path *) buf;
+		cp = dev->cnic_priv;
+		l5_cid = (u32) path_resp->handle;
+		if (l5_cid >= MAX_CM_SK_TBL_SZ)
+			break;
+
+		csk = &cp->csk_tbl[l5_cid];
+		csk_hold(csk);
+		if (cnic_in_use(csk)) {
+			memcpy(csk->ha, path_resp->mac_addr, 6);
+			if (test_bit(SK_F_IPV6, &csk->flags))
+				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+				       sizeof(struct in6_addr));
+			else
+				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+				       sizeof(struct in_addr));
+			if (is_valid_ether_addr(csk->ha))
+				cnic_cm_set_pg(csk);
+		}
+		csk_put(csk);
+		rc = 0;
+	}
+	}
+
+	return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		return 0;
+
+	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+			msleep(1);
+
+		return 1;
+	}
+	return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		msleep(1);
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+		return 1;
+	}
+
+	return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl[ulp_type]) {
+		printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
+				    "been registered\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+	}
+	read_unlock(&cnic_dev_lock);
+
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+	mutex_unlock(&cnic_lock);
+
+	/* Prevent race conditions with netdev_event */
+	rtnl_lock();
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_init(dev);
+	}
+	read_unlock(&cnic_dev_lock);
+	rtnl_unlock();
+
+	return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (!cnic_ulp_tbl[ulp_type]) {
+		printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
+				    "been registered\n", ulp_type);
+		goto out_unlock;
+	}
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+			printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
+			       "still has devices registered\n", ulp_type);
+			read_unlock(&cnic_dev_lock);
+			goto out_unlock;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+
+	mutex_unlock(&cnic_lock);
+	synchronize_rcu();
+	return 0;
+
+out_unlock:
+	mutex_unlock(&cnic_lock);
+	return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+				void *ulp_ctx)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl[ulp_type] == NULL) {
+		printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
+				    "has not been registered\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EAGAIN;
+	}
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
+		       "been registered to this device\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+	cp->ulp_handle[ulp_type] = ulp_ctx;
+	ulp_ops = cnic_ulp_tbl[ulp_type];
+	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+	cnic_hold(dev);
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+	mutex_unlock(&cnic_lock);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+		cnic_put(dev);
+	} else {
+		printk(KERN_ERR PFX "cnic_unregister_device: device not "
+		       "registered to this ulp type %d\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&cnic_lock);
+
+	synchronize_rcu();
+
+	return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
+{
+	id_tbl->start = start_id;
+	id_tbl->max = size;
+	id_tbl->next = 0;
+	spin_lock_init(&id_tbl->lock);
+	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+	if (!id_tbl->table)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+	kfree(id_tbl->table);
+	id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	int ret = -1;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return ret;
+
+	spin_lock(&id_tbl->lock);
+	if (!test_bit(id, id_tbl->table)) {
+		set_bit(id, id_tbl->table);
+		ret = 0;
+	}
+	spin_unlock(&id_tbl->lock);
+	return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+	u32 id;
+
+	spin_lock(&id_tbl->lock);
+	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+	if (id >= id_tbl->max) {
+		id = -1;
+		if (id_tbl->next != 0) {
+			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+			if (id >= id_tbl->next)
+				id = -1;
+		}
+	}
+
+	if (id < id_tbl->max) {
+		set_bit(id, id_tbl->table);
+		id_tbl->next = (id + 1) & (id_tbl->max - 1);
+		id += id_tbl->start;
+	}
+
+	spin_unlock(&id_tbl->lock);
+
+	return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	if (id == -1)
+		return;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return;
+
+	clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+
+	if (!dma->pg_arr)
+		return;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		if (dma->pg_arr[i]) {
+			pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
+					    dma->pg_arr[i], dma->pg_map_arr[i]);
+			dma->pg_arr[i] = NULL;
+		}
+	}
+	if (dma->pgtbl) {
+		pci_free_consistent(dev->pcidev, dma->pgtbl_size,
+				    dma->pgtbl, dma->pgtbl_map);
+		dma->pgtbl = NULL;
+	}
+	kfree(dma->pg_arr);
+	dma->pg_arr = NULL;
+	dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	u32 *page_table = dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in big endian format. */
+		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+		*page_table = (u32) dma->pg_map_arr[i];
+		page_table++;
+	}
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+			  int pages, int use_pg_tbl)
+{
+	int i, size;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+	if (dma->pg_arr == NULL)
+		return -ENOMEM;
+
+	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+	dma->num_pages = pages;
+
+	for (i = 0; i < pages; i++) {
+		dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
+						      BCM_PAGE_SIZE,
+						      &dma->pg_map_arr[i]);
+		if (dma->pg_arr[i] == NULL)
+			goto error;
+	}
+	if (!use_pg_tbl)
+		return 0;
+
+	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
+			  ~(BCM_PAGE_SIZE - 1);
+	dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
+					  &dma->pgtbl_map);
+	if (dma->pgtbl == NULL)
+		goto error;
+
+	cp->setup_pgtbl(dev, dma);
+
+	return 0;
+
+error:
+	cnic_free_dma(dev, dma);
+	return -ENOMEM;
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i = 0;
+
+	if (cp->cnic_uinfo) {
+		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+		while (cp->uio_dev != -1 && i < 15) {
+			msleep(100);
+			i++;
+		}
+		uio_unregister_device(cp->cnic_uinfo);
+		kfree(cp->cnic_uinfo);
+		cp->cnic_uinfo = NULL;
+	}
+
+	if (cp->l2_buf) {
+		pci_free_consistent(dev->pcidev, cp->l2_buf_size,
+				    cp->l2_buf, cp->l2_buf_map);
+		cp->l2_buf = NULL;
+	}
+
+	if (cp->l2_ring) {
+		pci_free_consistent(dev->pcidev, cp->l2_ring_size,
+				    cp->l2_ring, cp->l2_ring_map);
+		cp->l2_ring = NULL;
+	}
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		if (cp->ctx_arr[i].ctx) {
+			pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
+					    cp->ctx_arr[i].ctx,
+					    cp->ctx_arr[i].mapping);
+			cp->ctx_arr[i].ctx = NULL;
+		}
+	}
+	kfree(cp->ctx_arr);
+	cp->ctx_arr = NULL;
+	cp->ctx_blks = 0;
+
+	cnic_free_dma(dev, &cp->gbl_buf_info);
+	cnic_free_dma(dev, &cp->conn_buf_info);
+	cnic_free_dma(dev, &cp->kwq_info);
+	cnic_free_dma(dev, &cp->kcq_info);
+	kfree(cp->iscsi_tbl);
+	cp->iscsi_tbl = NULL;
+	kfree(cp->ctx_tbl);
+	cp->ctx_tbl = NULL;
+
+	cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		int i, k, arr_size;
+
+		cp->ctx_blk_size = BCM_PAGE_SIZE;
+		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+			   sizeof(struct cnic_ctx);
+		cp->ctx_arr = kmalloc(arr_size, GFP_KERNEL);
+		if (cp->ctx_arr == NULL)
+			return -ENOMEM;
+
+		memset(cp->ctx_arr, 0, arr_size);
+
+		k = 0;
+		for (i = 0; i < 2; i++) {
+			u32 j, reg, off, lo, hi;
+
+			if (i == 0)
+				off = BNX2_PG_CTX_MAP;
+			else
+				off = BNX2_ISCSI_CTX_MAP;
+
+			reg = cnic_reg_rd_ind(dev, off);
+			lo = reg >> 16;
+			hi = reg & 0xffff;
+			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+				cp->ctx_arr[k].cid = j;
+		}
+
+		cp->ctx_blks = k;
+		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+			cp->ctx_blks = 0;
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < cp->ctx_blks; i++) {
+			cp->ctx_arr[i].ctx =
+				pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
+						     &cp->ctx_arr[i].mapping);
+			if (cp->ctx_arr[i].ctx == NULL)
+				return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct uio_info *uinfo;
+	int ret;
+
+	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+	ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
+
+	ret = cnic_alloc_context(dev);
+	if (ret)
+		goto error;
+
+	cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
+	cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
+					   &cp->l2_ring_map);
+	if (!cp->l2_ring)
+		goto error;
+
+	cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+	cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
+	cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
+					   &cp->l2_buf_map);
+	if (!cp->l2_buf)
+		goto error;
+
+	uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
+	if (!uinfo)
+		goto error;
+
+	uinfo->mem[0].addr = dev->netdev->base_addr;
+	uinfo->mem[0].internal_addr = dev->regview;
+	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
+	uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+	uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
+	if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+	else
+		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
+	uinfo->mem[2].size = cp->l2_ring_size;
+	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
+	uinfo->mem[3].size = cp->l2_buf_size;
+	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->name = "bnx2_cnic";
+	uinfo->version = CNIC_MODULE_VERSION;
+	uinfo->irq = UIO_IRQ_CUSTOM;
+
+	uinfo->open = cnic_uio_open;
+	uinfo->release = cnic_uio_close;
+
+	uinfo->priv = dev;
+
+	ret = uio_register_device(&dev->pcidev->dev, uinfo);
+	if (ret) {
+		kfree(uinfo);
+		goto error;
+	}
+
+	cp->cnic_uinfo = uinfo;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return ret;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+	return cp->max_kwq_idx -
+		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				  u32 num_wqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct kwqe *prod_qe;
+	u16 prod, sw_prod, i;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	if (num_wqes > cnic_kwq_avail(cp) &&
+	    !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+		spin_unlock_bh(&cp->cnic_ulp_lock);
+		return -EAGAIN;
+	}
+
+	cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+
+	prod = cp->kwq_prod_idx;
+	sw_prod = prod & MAX_KWQ_IDX;
+	for (i = 0; i < num_wqes; i++) {
+		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+		prod++;
+		sw_prod = prod & MAX_KWQ_IDX;
+	}
+	cp->kwq_prod_idx = prod;
+
+	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+	return 0;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i, j;
+
+	i = 0;
+	j = 1;
+	while (num_cqes) {
+		struct cnic_ulp_ops *ulp_ops;
+		int ulp_type;
+		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+		u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
+
+		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+			cnic_kwq_completion(dev, 1);
+
+		while (j < num_cqes) {
+			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+			if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
+				break;
+
+			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+				cnic_kwq_completion(dev, 1);
+			j++;
+		}
+
+		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+			ulp_type = CNIC_ULP_RDMA;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+			ulp_type = CNIC_ULP_ISCSI;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+			ulp_type = CNIC_ULP_L4;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+			goto end;
+		else {
+			printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
+			       dev->netdev->name, kcqe_op_flag);
+			goto end;
+		}
+
+		rcu_read_lock();
+		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+		if (likely(ulp_ops)) {
+			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+						  cp->completed_kcq + i, j);
+		}
+		rcu_read_unlock();
+end:
+		num_cqes -= j;
+		i += j;
+		j = 1;
+	}
+	return;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+	return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+	return idx;
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 i, ri, last;
+	struct kcqe *kcqe;
+	int kcqe_cnt = 0, last_cnt = 0;
+
+	i = ri = last = *sw_prod;
+	ri &= MAX_KCQ_IDX;
+
+	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+		kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+		cp->completed_kcq[kcqe_cnt++] = kcqe;
+		i = cp->next_idx(i);
+		ri = i & MAX_KCQ_IDX;
+		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+			last_cnt = kcqe_cnt;
+			last = i;
+		}
+	}
+
+	*sw_prod = last;
+	return last_cnt;
+}
+
+static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
+{
+	u16 rx_cons = *cp->rx_cons_ptr;
+	u16 tx_cons = *cp->tx_cons_ptr;
+
+	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+		cp->tx_cons = tx_cons;
+		cp->rx_cons = rx_cons;
+		uio_event_notify(cp->cnic_uinfo);
+	}
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+	struct status_block *sblk = status_blk;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx = sblk->status_idx;
+	u16 hw_prod, sw_prod;
+	int kcqe_cnt;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return status_idx;
+
+	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+	hw_prod = sblk->status_completion_producer_index;
+	sw_prod = cp->kcq_prod_idx;
+	while (sw_prod != hw_prod) {
+		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+		if (kcqe_cnt == 0)
+			goto done;
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		if (status_idx != sblk->status_idx) {
+			status_idx = sblk->status_idx;
+			cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+			hw_prod = sblk->status_completion_producer_index;
+		} else
+			break;
+	}
+
+done:
+	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+
+	cp->kcq_prod_idx = sw_prod;
+
+	cnic_chk_bnx2_pkt_rings(cp);
+	return status_idx;
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct status_block_msix *status_blk = cp->bnx2_status_blk;
+	u32 status_idx = status_blk->status_idx;
+	u16 hw_prod, sw_prod;
+	int kcqe_cnt;
+
+	cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+
+	hw_prod = status_blk->status_completion_producer_index;
+	sw_prod = cp->kcq_prod_idx;
+	while (sw_prod != hw_prod) {
+		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+		if (kcqe_cnt == 0)
+			goto done;
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		if (status_idx != status_blk->status_idx) {
+			status_idx = status_blk->status_idx;
+			cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+			hw_prod = status_blk->status_completion_producer_index;
+		} else
+			break;
+	}
+
+done:
+	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+	cp->kcq_prod_idx = sw_prod;
+
+	cnic_chk_bnx2_pkt_rings(cp);
+
+	cp->last_status_idx = status_idx;
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+	struct cnic_dev *dev = dev_instance;
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
+
+	if (cp->ack_int)
+		cp->ack_int(dev);
+
+	prefetch(cp->status_blk);
+	prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		tasklet_schedule(&cp->cnic_irq_task);
+
+	return IRQ_HANDLED;
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	rcu_read_lock();
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+		if (!ulp_ops)
+			continue;
+
+		if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+	}
+	rcu_read_unlock();
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	rcu_read_lock();
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+		if (!ulp_ops || !ulp_ops->cnic_start)
+			continue;
+
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+	struct cnic_dev *dev = data;
+
+	switch (info->cmd) {
+	case CNIC_CTL_STOP_CMD:
+		cnic_hold(dev);
+		mutex_lock(&cnic_lock);
+
+		cnic_ulp_stop(dev);
+		cnic_stop_hw(dev);
+
+		mutex_unlock(&cnic_lock);
+		cnic_put(dev);
+		break;
+	case CNIC_CTL_START_CMD:
+		cnic_hold(dev);
+		mutex_lock(&cnic_lock);
+
+		if (!cnic_start_hw(dev))
+			cnic_ulp_start(dev);
+
+		mutex_unlock(&cnic_lock);
+		cnic_put(dev);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	rcu_read_lock();
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+		if (!ulp_ops || !ulp_ops->cnic_init)
+			continue;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_init(dev);
+
+	}
+	rcu_read_unlock();
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	rcu_read_lock();
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+		if (!ulp_ops || !ulp_ops->cnic_exit)
+			continue;
+
+		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_exit(dev);
+
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_offload_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+	l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->sa0 = dev->mac_addr[0];
+	l4kwqe->sa1 = dev->mac_addr[1];
+	l4kwqe->sa2 = dev->mac_addr[2];
+	l4kwqe->sa3 = dev->mac_addr[3];
+	l4kwqe->sa4 = dev->mac_addr[4];
+	l4kwqe->sa5 = dev->mac_addr[5];
+
+	l4kwqe->etype = ETH_P_IP;
+	l4kwqe->ipid_count = DEF_IPID_COUNT;
+	l4kwqe->host_opaque = csk->l5_cid;
+
+	if (csk->vlan_id) {
+		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+		l4kwqe->vlan_tag = csk->vlan_id;
+		l4kwqe->l2hdr_nbytes += 4;
+	}
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_update_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+	l4kwqe->pg_cid = csk->pg_cid;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->pg_host_opaque = csk->l5_cid;
+	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_upload *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->pg_cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_connect_req1 *l4kwqe1;
+	struct l4_kwq_connect_req2 *l4kwqe2;
+	struct l4_kwq_connect_req3 *l4kwqe3;
+	struct kwqe *wqes[3];
+	u8 tcp_flags = 0;
+	int num_wqes = 2;
+
+	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+	l4kwqe3->flags =
+		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+	l4kwqe3->ka_timeout = csk->ka_timeout;
+	l4kwqe3->ka_interval = csk->ka_interval;
+	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+	l4kwqe3->tos = csk->tos;
+	l4kwqe3->ttl = csk->ttl;
+	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+	l4kwqe3->pmtu = csk->mtu;
+	l4kwqe3->rcv_buf = csk->rcv_buf;
+	l4kwqe3->snd_buf = csk->snd_buf;
+	l4kwqe3->seed = csk->seed;
+
+	wqes[0] = (struct kwqe *) l4kwqe1;
+	if (test_bit(SK_F_IPV6, &csk->flags)) {
+		wqes[1] = (struct kwqe *) l4kwqe2;
+		wqes[2] = (struct kwqe *) l4kwqe3;
+		num_wqes = 3;
+
+		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+		l4kwqe2->flags =
+			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+			       sizeof(struct tcphdr);
+	} else {
+		wqes[1] = (struct kwqe *) l4kwqe3;
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+			       sizeof(struct tcphdr);
+	}
+
+	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+	l4kwqe1->flags =
+		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+	l4kwqe1->cid = csk->cid;
+	l4kwqe1->pg_cid = csk->pg_cid;
+	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+	if (csk->tcp_flags & SK_TCP_NAGLE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+	if (csk->tcp_flags & SK_TCP_SACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+	l4kwqe1->tcp_flags = tcp_flags;
+
+	return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_close_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_reset_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+			  u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_sock *csk1;
+
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return -EINVAL;
+
+	csk1 = &cp->csk_tbl[l5_cid];
+	if (atomic_read(&csk1->ref_count))
+		return -EAGAIN;
+
+	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+		return -EBUSY;
+
+	csk1->dev = dev;
+	csk1->cid = cid;
+	csk1->l5_cid = l5_cid;
+	csk1->ulp_type = ulp_type;
+	csk1->context = context;
+
+	csk1->ka_timeout = DEF_KA_TIMEOUT;
+	csk1->ka_interval = DEF_KA_INTERVAL;
+	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+	csk1->tos = DEF_TOS;
+	csk1->ttl = DEF_TTL;
+	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+	csk1->rcv_buf = DEF_RCV_BUF;
+	csk1->snd_buf = DEF_SND_BUF;
+	csk1->seed = DEF_SEED;
+
+	*csk = csk1;
+	return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+	if (csk->src_port) {
+		struct cnic_dev *dev = csk->dev;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		cnic_free_id(&cp->csk_port_tbl, csk->src_port);
+		csk->src_port = 0;
+	}
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+		cnic_cm_upload_pg(csk);
+		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	}
+	cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	csk_hold(csk);
+	clear_bit(SK_F_INUSE, &csk->flags);
+	smp_mb__after_clear_bit();
+	while (atomic_read(&csk->ref_count) != 1)
+		msleep(1);
+	cnic_cm_cleanup(csk);
+
+	csk->flags = 0;
+	csk_put(csk);
+	return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+				struct net_device **vlan_dev)
+{
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		*vlan_dev = vlan_dev_real_dev(dev);
+		return vlan_dev_vlan_id(dev);
+	}
+	*vlan_dev = dev;
+	return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+			     struct dst_entry **dst)
+{
+	struct flowi fl;
+	int err;
+	struct rtable *rt;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
+
+	err = ip_route_output_key(&init_net, &rt, &fl);
+	if (!err)
+		*dst = &rt->u.dst;
+	return err;
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+			     struct dst_entry **dst)
+{
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	struct flowi fl;
+
+	memset(&fl, 0, sizeof(fl));
+	ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
+	if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
+		fl.oif = dst_addr->sin6_scope_id;
+
+	*dst = ip6_route_output(&init_net, NULL, &fl);
+	if (*dst)
+		return 0;
+#endif
+
+	return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+					   int ulp_type)
+{
+	struct cnic_dev *dev = NULL;
+	struct dst_entry *dst;
+	struct net_device *netdev = NULL;
+	int err = -ENETUNREACH;
+
+	if (dst_addr->sin_family == AF_INET)
+		err = cnic_get_v4_route(dst_addr, &dst);
+	else if (dst_addr->sin_family == AF_INET6) {
+		struct sockaddr_in6 *dst_addr6 =
+			(struct sockaddr_in6 *) dst_addr;
+
+		err = cnic_get_v6_route(dst_addr6, &dst);
+	} else
+		return NULL;
+
+	if (err)
+		return NULL;
+
+	if (!dst->dev)
+		goto done;
+
+	cnic_get_vlan(dst->dev, &netdev);
+
+	dev = cnic_from_netdev(netdev);
+
+done:
+	dst_release(dst);
+	if (dev)
+		cnic_put(dev);
+	return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	int is_v6, err, rc = -ENETUNREACH;
+	struct dst_entry *dst;
+	struct net_device *realdev;
+	u32 local_port;
+
+	if (saddr->local.v6.sin6_family == AF_INET6 &&
+	    saddr->remote.v6.sin6_family == AF_INET6)
+		is_v6 = 1;
+	else if (saddr->local.v4.sin_family == AF_INET &&
+		 saddr->remote.v4.sin_family == AF_INET)
+		is_v6 = 0;
+	else
+		return -EINVAL;
+
+	clear_bit(SK_F_IPV6, &csk->flags);
+
+	if (is_v6) {
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+		set_bit(SK_F_IPV6, &csk->flags);
+		err = cnic_get_v6_route(&saddr->remote.v6, &dst);
+		if (err)
+			return err;
+
+		if (!dst || dst->error || !dst->dev)
+			goto err_out;
+
+		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+		       sizeof(struct in6_addr));
+		csk->dst_port = saddr->remote.v6.sin6_port;
+		local_port = saddr->local.v6.sin6_port;
+#else
+		return rc;
+#endif
+
+	} else {
+		err = cnic_get_v4_route(&saddr->remote.v4, &dst);
+		if (err)
+			return err;
+
+		if (!dst || dst->error || !dst->dev)
+			goto err_out;
+
+		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+		csk->dst_port = saddr->remote.v4.sin_port;
+		local_port = saddr->local.v4.sin_port;
+	}
+
+	csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
+	if (realdev != dev->netdev)
+		goto err_out;
+
+	if (local_port >= CNIC_LOCAL_PORT_MIN &&
+	    local_port < CNIC_LOCAL_PORT_MAX) {
+		if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
+			local_port = 0;
+	} else
+		local_port = 0;
+
+	if (!local_port) {
+		local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
+		if (local_port == -1) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+	}
+	csk->src_port = local_port;
+
+	csk->mtu = dst_mtu(dst);
+	rc = 0;
+
+err_out:
+	dst_release(dst);
+	return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+	csk->state = 0;
+	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+	clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	int err = 0;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+		return -EINVAL;
+
+	cnic_init_csk_state(csk);
+
+	err = cnic_get_route(csk, saddr);
+	if (err)
+		goto err_out;
+
+	err = cnic_resolve_addr(csk, saddr);
+	if (!err)
+		return 0;
+
+err_out:
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+	struct cnic_local *cp = csk->dev->cnic_priv;
+	u32 opcode;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_abort_prep(csk))
+		return cnic_cm_abort_req(csk);
+
+	/* Getting here means that we haven't started connect, or
+	 * connect was not successful.
+	 */
+
+	csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+		opcode = csk->state;
+	else
+		opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+	cp->close_conn(csk, opcode);
+
+	return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_close_prep(csk)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+		return cnic_cm_close_req(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+			   u8 opcode)
+{
+	struct cnic_ulp_ops *ulp_ops;
+	int ulp_type = csk->ulp_type;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (ulp_ops) {
+		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+			ulp_ops->cm_connect_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			ulp_ops->cm_close_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+			ulp_ops->cm_remote_abort(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+			ulp_ops->cm_abort_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+			ulp_ops->cm_remote_close(csk);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+	if (cnic_offld_prep(csk)) {
+		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			cnic_cm_update_pg(csk);
+		else
+			cnic_cm_offload_pg(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 l5_cid = kcqe->pg_host_opaque;
+	u8 opcode = kcqe->op_code;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+	csk_hold(csk);
+	if (!cnic_in_use(csk))
+		goto done;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		goto done;
+	}
+	csk->pg_cid = kcqe->pg_cid;
+	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	cnic_cm_conn_req(csk);
+
+done:
+	csk_put(csk);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+	u8 opcode = l4kcqe->op_code;
+	u32 l5_cid;
+	struct cnic_sock *csk;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		cnic_cm_process_offld_pg(dev, l4kcqe);
+		return;
+	}
+
+	l5_cid = l4kcqe->conn_id;
+	if (opcode & 0x80)
+		l5_cid = l4kcqe->cid;
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return;
+
+	csk = &cp->csk_tbl[l5_cid];
+	csk_hold(csk);
+
+	if (!cnic_in_use(csk)) {
+		csk_put(csk);
+		return;
+	}
+
+	switch (opcode) {
+	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+		if (l4kcqe->status == 0)
+			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+
+		smp_mb__before_clear_bit();
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+		if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
+			csk->state = opcode;
+		/* fall through */
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+		cp->close_conn(csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+	}
+	csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+	struct cnic_dev *dev = data;
+	int i;
+
+	for (i = 0; i < num; i++)
+		cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+	.indicate_kcqes		= cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	kfree(cp->csk_tbl);
+	cp->csk_tbl = NULL;
+	cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->csk_tbl = kmalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+			      GFP_KERNEL);
+	if (!cp->csk_tbl)
+		return -ENOMEM;
+	memset(cp->csk_tbl, 0, sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ);
+
+	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+			     CNIC_LOCAL_PORT_MIN)) {
+		cnic_cm_free_mem(dev);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+	if ((opcode == csk->state) ||
+	    (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
+	     csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
+		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
+			return 1;
+	}
+	return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	if (cnic_ready_to_close(csk, opcode)) {
+		cnic_close_conn(csk);
+		cnic_cm_upcall(cp, csk, opcode);
+	}
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+	u32 seed;
+
+	get_random_bytes(&seed, 4);
+	cnic_ctx_wr(dev, 45, 0, seed);
+	return 0;
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int err;
+
+	err = cnic_cm_alloc_mem(dev);
+	if (err)
+		return err;
+
+	err = cp->start_cm(dev);
+
+	if (err)
+		goto err_out;
+
+	dev->cm_create = cnic_cm_create;
+	dev->cm_destroy = cnic_cm_destroy;
+	dev->cm_connect = cnic_cm_connect;
+	dev->cm_abort = cnic_cm_abort;
+	dev->cm_close = cnic_cm_close;
+	dev->cm_select_dev = cnic_cm_select_dev;
+
+	cp->ulp_handle[CNIC_ULP_L4] = dev;
+	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+	return 0;
+
+err_out:
+	cnic_cm_free_mem(dev);
+	return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	cp->stop_cm(dev);
+
+	if (!cp->csk_tbl)
+		return 0;
+
+	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+		struct cnic_sock *csk = &cp->csk_tbl[i];
+
+		clear_bit(SK_F_INUSE, &csk->flags);
+		cnic_cm_cleanup(csk);
+	}
+	cnic_cm_free_mem(dev);
+
+	return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 cid_addr;
+	int i;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5709)
+		return;
+
+	cid_addr = GET_CID_ADDR(cid);
+
+	for (i = 0; i < CTX_SIZE; i += 4)
+		cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret = 0, i;
+	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		return 0;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		int j;
+		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+		u32 val;
+
+		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+			(u64) cp->ctx_arr[i].mapping >> 32);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		cp->disable_int_sync(dev);
+		tasklet_disable(&cp->cnic_irq_task);
+		free_irq(ethdev->irq_arr[0].vector, dev);
+	}
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		int err, i = 0;
+		int sblk_num = cp->status_blk_num;
+		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+			   BNX2_HC_SB_CONFIG_1;
+
+		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+		cp->bnx2_status_blk = cp->status_blk;
+		cp->last_status_idx = cp->bnx2_status_blk->status_idx;
+		tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
+			     (unsigned long) dev);
+		err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
+				  "cnic", dev);
+		if (err) {
+			tasklet_disable(&cp->cnic_irq_task);
+			return err;
+		}
+		while (cp->bnx2_status_blk->status_completion_producer_index &&
+		       i < 10) {
+			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+				1 << (11 + sblk_num));
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (cp->bnx2_status_blk->status_completion_producer_index) {
+			cnic_free_irq(dev);
+			goto failed;
+		}
+
+	} else {
+		struct status_block *sblk = cp->status_blk;
+		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+		int i = 0;
+
+		while (sblk->status_completion_producer_index && i < 10) {
+			CNIC_WR(dev, BNX2_HC_COMMAND,
+				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (sblk->status_completion_producer_index)
+			goto failed;
+
+	}
+	return 0;
+
+failed:
+	printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
+	       dev->netdev->name);
+	return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+	synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 cid_addr, tx_cid, sb_id;
+	u32 val, offset0, offset1, offset2, offset3;
+	int i;
+	struct tx_bd *txbd;
+	dma_addr_t buf_map;
+	struct status_block *s_blk = cp->status_blk;
+
+	sb_id = cp->status_blk_num;
+	tx_cid = 20;
+	cnic_init_context(dev, tx_cid);
+	cnic_init_context(dev, tx_cid + 1);
+	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk;
+
+		tx_cid = TX_TSS_CID + sb_id - 1;
+		cnic_init_context(dev, tx_cid);
+		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+			(TX_TSS_CID << 7));
+		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+	}
+	cp->tx_cons = *cp->tx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(tx_cid);
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+		for (i = 0; i < PHY_CTX_SIZE; i += 4)
+			cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+	txbd = (struct tx_bd *) cp->l2_ring;
+
+	buf_map = cp->l2_buf_map;
+	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) cp->l2_ring_map >> 32;
+	cnic_ctx_wr(dev, cid_addr, offset2, val);
+	txbd->tx_bd_haddr_hi = val;
+
+	val = (u64) cp->l2_ring_map & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, offset3, val);
+	txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 cid_addr, sb_id, val, coal_reg, coal_val;
+	int i;
+	struct rx_bd *rxbd;
+	struct status_block *s_blk = cp->status_blk;
+
+	sb_id = cp->status_blk_num;
+	cnic_init_context(dev, 2);
+	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+	coal_reg = BNX2_HC_COMMAND;
+	coal_val = CNIC_RD(dev, coal_reg);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk;
+
+		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+		coal_reg = BNX2_HC_COALESCE_NOW;
+		coal_val = 1 << (11 + sb_id);
+	}
+	i = 0;
+	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+		CNIC_WR(dev, coal_reg, coal_val);
+		udelay(10);
+		i++;
+		barrier();
+	}
+	cp->rx_cons = *cp->rx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(2);
+	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+	if (sb_id == 0)
+		val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
+	else
+		val = BNX2_L2CTX_STATUSB_NUM(sb_id);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+	rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
+	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->rx_bd_len = cp->l2_single_buf_size;
+		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+	rxbd->rx_bd_haddr_hi = val;
+
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+	rxbd->rx_bd_haddr_lo = val;
+
+	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct kwqe *wqes[1], l2kwqe;
+
+	memset(&l2kwqe, 0, sizeof(l2kwqe));
+	wqes[0] = &l2kwqe;
+	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
+			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
+			       KWQE_OPCODE_SHIFT) | 2;
+	dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 val;
+
+	val = cp->func << 2;
+
+	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+	dev->mac_addr[0] = (u8) (val >> 8);
+	dev->mac_addr[1] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+	dev->mac_addr[2] = (u8) (val >> 24);
+	dev->mac_addr[3] = (u8) (val >> 16);
+	dev->mac_addr[4] = (u8) (val >> 8);
+	dev->mac_addr[5] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct status_block *sblk = cp->status_blk;
+	u32 val;
+	int err;
+
+	cnic_set_bnx2_mac(dev);
+
+	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+	if (BCM_PAGE_BITS > 12)
+		val |= (12 - 8)  << 4;
+	else
+		val |= (BCM_PAGE_BITS - 8)  << 4;
+
+	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+	err = cnic_setup_5709_context(dev, 1);
+	if (err)
+		return err;
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->max_kwq_idx = MAX_KWQ_IDX;
+	cp->kwq_prod_idx = 0;
+	cp->kwq_con_idx = 0;
+	cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+	else
+		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+	/* Initialize the kernel work queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kwq_info.pgtbl_map;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+	cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->kcq_prod_idx = 0;
+
+	/* Initialize the kernel complete queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kcq_info.pgtbl_map;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->int_num = 0;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		u32 sb_id = cp->status_blk_num;
+		u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
+
+		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+		cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+		cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+	}
+
+	/* Enable Commnad Scheduler notification when we write to the
+	 * host producer index of the kernel contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+	/* Enable Command Scheduler notification when we write to either
+	 * the Send Queue or Receive Queue producer indexes of the kernel
+	 * bypass contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+	/* Notify COM when the driver post an application buffer. */
+	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+	/* Set the CP and COM doorbells.  These two processors polls the
+	 * doorbell for a non zero value before running.  This must be done
+	 * after setting up the kernel queue contexts. */
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	err = cnic_init_bnx2_irq(dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
+		       dev->netdev->name);
+		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+		return err;
+	}
+
+	return 0;
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EALREADY;
+
+	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: register_cnic failed\n",
+		       dev->netdev->name);
+		goto err2;
+	}
+
+	dev->regview = ethdev->io_base;
+	cp->chip_id = ethdev->chip_id;
+	pci_dev_get(dev->pcidev);
+	cp->func = PCI_FUNC(dev->pcidev->devfn);
+	cp->status_blk = ethdev->irq_arr[0].status_blk;
+	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+	err = cp->alloc_resc(dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: allocate resource failure\n",
+		       dev->netdev->name);
+		goto err1;
+	}
+
+	err = cp->start_hw(dev);
+	if (err)
+		goto err1;
+
+	err = cnic_cm_open(dev);
+	if (err)
+		goto err1;
+
+	set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+	cp->enable_int(dev);
+
+	return 0;
+
+err1:
+	ethdev->drv_unregister_cnic(dev->netdev);
+	cp->free_resc(dev);
+	pci_dev_put(dev->pcidev);
+err2:
+	return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	cnic_disable_bnx2_int_sync(dev);
+
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cnic_setup_5709_context(dev, 0);
+	cnic_free_irq(dev);
+
+	ethdev->drv_unregister_cnic(dev->netdev);
+
+	cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+		rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+		synchronize_rcu();
+		cnic_cm_shutdown(dev);
+		cp->stop_hw(dev);
+		pci_dev_put(dev->pcidev);
+	}
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+	int i = 0;
+
+	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+		msleep(100);
+		i++;
+	}
+	if (atomic_read(&dev->ref_count) != 0)
+		printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
+				    " to zero.\n", dev->netdev->name);
+
+	printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
+	dev_put(dev->netdev);
+	kfree(dev);
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+				       struct pci_dev *pdev)
+{
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	int alloc_size;
+
+	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+	cdev = kmalloc(alloc_size , GFP_KERNEL);
+	if (cdev == NULL) {
+		printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
+		       dev->name);
+		return NULL;
+	}
+	memset(cdev, 0, alloc_size);
+
+	cdev->netdev = dev;
+	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+	cdev->register_device = cnic_register_device;
+	cdev->unregister_device = cnic_unregister_device;
+	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+
+	cp = cdev->cnic_priv;
+	cp->dev = cdev;
+	cp->uio_dev = -1;
+	cp->l2_single_buf_size = 0x400;
+	cp->l2_rx_ring_size = 3;
+
+	spin_lock_init(&cp->cnic_ulp_lock);
+
+	printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
+
+	return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct cnic_eth_dev *ethdev = NULL;
+	struct cnic_eth_dev *(*probe)(void *) = NULL;
+
+	probe = __symbol_get("bnx2_cnic_probe");
+	if (probe) {
+		ethdev = (*probe)(dev);
+		symbol_put_addr(probe);
+	}
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	pci_dev_get(pdev);
+	if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+	    pdev->device == PCI_DEVICE_ID_NX2_5709S) {
+		u8 rev;
+
+		pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+		if (rev < 0x10) {
+			pci_dev_put(pdev);
+			goto cnic_err;
+		}
+	}
+	pci_dev_put(pdev);
+
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL)
+		goto cnic_err;
+
+	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+
+	cp->cnic_ops = &cnic_bnx2_ops;
+	cp->start_hw = cnic_start_bnx2_hw;
+	cp->stop_hw = cnic_stop_bnx2_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl;
+	cp->alloc_resc = cnic_alloc_bnx2_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2_hw;
+	cp->enable_int = cnic_enable_bnx2_int;
+	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+	cp->close_conn = cnic_close_bnx2_conn;
+	cp->next_idx = cnic_bnx2_next_idx;
+	cp->hw_idx = cnic_bnx2_hw_idx;
+	return cdev;
+
+cnic_err:
+	dev_put(dev);
+	return NULL;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+	struct ethtool_drvinfo drvinfo;
+	struct cnic_dev *cdev = NULL;
+
+	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+		if (!strcmp(drvinfo.driver, "bnx2"))
+			cdev = init_bnx2_cnic(dev);
+		if (cdev) {
+			write_lock(&cnic_dev_lock);
+			list_add(&cdev->list, &cnic_dev_list);
+			write_unlock(&cnic_dev_lock);
+		}
+	}
+	return cdev;
+}
+
+/**
+ * netdev event handler
+ */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+							 void *ptr)
+{
+	struct net_device *netdev = ptr;
+	struct cnic_dev *dev;
+	int if_type;
+	int new_dev = 0;
+
+	dev = cnic_from_netdev(netdev);
+
+	if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
+		/* Check for the hot-plug device */
+		dev = is_cnic_dev(netdev);
+		if (dev) {
+			new_dev = 1;
+			cnic_hold(dev);
+		}
+	}
+	if (dev) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (new_dev)
+			cnic_ulp_init(dev);
+		else if (event == NETDEV_UNREGISTER)
+			cnic_ulp_exit(dev);
+		else if (event == NETDEV_UP) {
+			mutex_lock(&cnic_lock);
+			if (!cnic_start_hw(dev))
+				cnic_ulp_start(dev);
+			mutex_unlock(&cnic_lock);
+		}
+
+		rcu_read_lock();
+		for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+			struct cnic_ulp_ops *ulp_ops;
+			void *ctx;
+
+			ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+			if (!ulp_ops || !ulp_ops->indicate_netevent)
+				continue;
+
+			ctx = cp->ulp_handle[if_type];
+
+			ulp_ops->indicate_netevent(ctx, event);
+		}
+		rcu_read_unlock();
+
+		if (event == NETDEV_GOING_DOWN) {
+			mutex_lock(&cnic_lock);
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+			mutex_unlock(&cnic_lock);
+		} else if (event == NETDEV_UNREGISTER) {
+			write_lock(&cnic_dev_lock);
+			list_del_init(&dev->list);
+			write_unlock(&cnic_dev_lock);
+
+			cnic_put(dev);
+			cnic_free_dev(dev);
+			goto done;
+		}
+		cnic_put(dev);
+	}
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+	.notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+	struct cnic_dev *dev;
+
+	while (!list_empty(&cnic_dev_list)) {
+		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
+		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+		}
+
+		cnic_ulp_exit(dev);
+		list_del_init(&dev->list);
+		cnic_free_dev(dev);
+	}
+}
+
+static int __init cnic_init(void)
+{
+	int rc = 0;
+
+	printk(KERN_INFO "%s", version);
+
+	rc = register_netdevice_notifier(&cnic_netdev_notifier);
+	if (rc) {
+		cnic_release();
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+	unregister_netdevice_notifier(&cnic_netdev_notifier);
+	cnic_release();
+	return;
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 0000000..5192d4a
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
+/* cnic.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define KWQ_PAGE_CNT	4
+#define KCQ_PAGE_CNT	16
+
+#define KWQ_CID 		24
+#define KCQ_CID 		25
+
+/*
+ *	krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS	0x00000000
+#define L5_KRNLQ_SIZE	0x00000000
+#define L5_KRNLQ_TYPE	0x00000000
+#define KRNLQ_FLAGS_PG_SZ					(0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256					(0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512					(1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K					(2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K					(3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K					(4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K					(5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K					(6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K					(7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K					(8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K					(9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K					(10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K					(11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M					(12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M					(13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ					(1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE	((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE						(0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY					(0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ					(6<<28)
+
+#define L5_KRNLQ_HOST_QIDX		0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX		0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ 	0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX	0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI 	0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO 	0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX		0x00000018
+#define L5_KRNLQ_NX_PG_QIDX 		0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES		0x0000001c
+#define L5_KRNLQ_QIDX_INCR		0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI 	0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO 	0x00000024
+
+#define BNX2_PG_CTX_MAP			0x1a0034
+#define BNX2_ISCSI_CTX_MAP		0x1a0074
+
+struct cnic_redirect_entry {
+	struct dst_entry *old_dst;
+	struct dst_entry *new_dst;
+};
+
+#define MAX_COMPLETED_KCQE	64
+
+#define MAX_CNIC_L5_CONTEXT	256
+
+#define MAX_CM_SK_TBL_SZ	MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ	256
+
+#define CNIC_LOCAL_PORT_MIN	60000
+#define CNIC_LOCAL_PORT_MAX	61000
+#define CNIC_LOCAL_PORT_RANGE	(CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX	((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX	((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) ==		\
+		(MAX_KCQE_CNT - 1)) ?					\
+		(x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x)						\
+	&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_COUNT		0xc001
+
+#define DEF_KA_TIMEOUT		10000
+#define DEF_KA_INTERVAL		300000
+#define DEF_KA_MAX_PROBE_COUNT	3
+#define DEF_TOS			0
+#define DEF_TTL			0xfe
+#define DEF_SND_SEQ_SCALE	0
+#define DEF_RCV_BUF		0xffff
+#define DEF_SND_BUF		0xffff
+#define DEF_SEED		0
+#define DEF_MAX_RT_TIME		500
+#define DEF_MAX_DA_COUNT	2
+#define DEF_SWS_TIMER		1000
+#define DEF_MAX_CWND		0xffff
+
+struct cnic_ctx {
+	u32		cid;
+	void		*ctx;
+	dma_addr_t	mapping;
+};
+
+#define BNX2_MAX_CID		0x2000
+
+struct cnic_dma {
+	int		num_pages;
+	void		**pg_arr;
+	dma_addr_t	*pg_map_arr;
+	int		pgtbl_size;
+	u32		*pgtbl;
+	dma_addr_t	pgtbl_map;
+};
+
+struct cnic_id_tbl {
+	spinlock_t	lock;
+	u32		start;
+	u32		max;
+	u32		next;
+	unsigned long	*table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE	128
+
+struct kwqe_16_data {
+	u8	data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+	struct cnic_dma		task_array_info;
+	struct cnic_dma		r2tq_info;
+	struct cnic_dma		hq_info;
+};
+
+struct cnic_context {
+	u32			cid;
+	struct kwqe_16_data	*kwqe_data;
+	dma_addr_t		kwqe_data_mapping;
+	wait_queue_head_t	waitq;
+	int			wait_cond;
+	unsigned long		timestamp;
+	u32			ctx_flags;
+#define	CTX_FL_OFFLD_START	0x00000001
+	u8			ulp_proto_id;
+	union {
+		struct cnic_iscsi	*iscsi;
+	} proto;
+};
+
+struct cnic_local {
+
+	spinlock_t cnic_ulp_lock;
+	void *ulp_handle[MAX_CNIC_ULP_TYPE];
+	unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT	0
+#define ULP_F_START	1
+	struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+	/* protected by ulp_lock */
+	u32 cnic_local_flags;
+#define	CNIC_LCL_FL_KWQ_INIT	0x00000001
+
+	struct cnic_dev *dev;
+
+	struct cnic_eth_dev *ethdev;
+
+	void		*l2_ring;
+	dma_addr_t	l2_ring_map;
+	int		l2_ring_size;
+	int		l2_rx_ring_size;
+
+	void		*l2_buf;
+	dma_addr_t	l2_buf_map;
+	int		l2_buf_size;
+	int		l2_single_buf_size;
+
+	u16		*rx_cons_ptr;
+	u16		*tx_cons_ptr;
+	u16		rx_cons;
+	u16		tx_cons;
+
+	u32 kwq_cid_addr;
+	u32 kcq_cid_addr;
+
+	struct cnic_dma		kwq_info;
+	struct kwqe		**kwq;
+
+	struct cnic_dma		kwq_16_data_info;
+
+	u16		max_kwq_idx;
+
+	u16		kwq_prod_idx;
+	u32		kwq_io_addr;
+
+	u16		*kwq_con_idx_ptr;
+	u16		kwq_con_idx;
+
+	struct cnic_dma	kcq_info;
+	struct kcqe	**kcq;
+
+	u16		kcq_prod_idx;
+	u32		kcq_io_addr;
+
+	void				*status_blk;
+	struct status_block_msix	*bnx2_status_blk;
+	struct host_status_block	*bnx2x_status_blk;
+
+	u32				status_blk_num;
+	u32				int_num;
+	u32				last_status_idx;
+	struct tasklet_struct		cnic_irq_task;
+
+	struct kcqe		*completed_kcq[MAX_COMPLETED_KCQE];
+
+	struct cnic_sock	*csk_tbl;
+	struct cnic_id_tbl	csk_port_tbl;
+
+	struct cnic_dma		conn_buf_info;
+	struct cnic_dma		gbl_buf_info;
+
+	struct cnic_iscsi	*iscsi_tbl;
+	struct cnic_context	*ctx_tbl;
+	struct cnic_id_tbl	cid_tbl;
+	int			max_iscsi_conn;
+	atomic_t		iscsi_conn;
+
+	/* per connection parameters */
+	int			num_iscsi_tasks;
+	int			num_ccells;
+	int			task_array_size;
+	int			r2tq_size;
+	int			hq_size;
+	int			num_cqs;
+
+	struct cnic_ctx		*ctx_arr;
+	int			ctx_blks;
+	int			ctx_blk_size;
+	int			cids_per_blk;
+
+	u32			chip_id;
+	int			func;
+	u32			shmem_base;
+
+	u32			uio_dev;
+	struct uio_info		*cnic_uinfo;
+
+	struct cnic_ops		*cnic_ops;
+	int			(*start_hw)(struct cnic_dev *);
+	void			(*stop_hw)(struct cnic_dev *);
+	void			(*setup_pgtbl)(struct cnic_dev *,
+					       struct cnic_dma *);
+	int			(*alloc_resc)(struct cnic_dev *);
+	void			(*free_resc)(struct cnic_dev *);
+	int			(*start_cm)(struct cnic_dev *);
+	void			(*stop_cm)(struct cnic_dev *);
+	void			(*enable_int)(struct cnic_dev *);
+	void			(*disable_int_sync)(struct cnic_dev *);
+	void			(*ack_int)(struct cnic_dev *);
+	void			(*close_conn)(struct cnic_sock *, u32 opcode);
+	u16			(*next_idx)(u16);
+	u16			(*hw_idx)(u16);
+};
+
+struct bnx2x_bd_chain_next {
+	u32	addr_lo;
+	u32	addr_hi;
+	u8	reserved[8];
+};
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN		(ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT		(ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#endif
+
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 0000000..cee80f6
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
+
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH                  (4)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1               (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2               (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3               (52)
+#define L4_KWQE_OPCODE_VALUE_RESET                  (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE                  (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET          (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE			(0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT		(L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE		(L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT		(L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE	(L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD	(L5CM_RAMROD_CMD_ID_BASE + 15)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP             (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP             (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE          (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE       (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED         (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED         (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS		    (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT        (0x93)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+	u32 cid;
+	u32 pg_cid;
+	u32 conn_id;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u16 status;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u16 status;
+#endif
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+	u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+	u16 pg_status;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u16 pg_status;
+#endif
+	u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 pg_cid;
+	u32 src_ip;
+	u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+	u16 dst_port;
+	u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 src_port;
+	u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rsrv1[3];
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+	u8 rsrv1[3];
+#endif
+	u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rsrv;
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 reserved2;
+	u32 src_ip_v6_2;
+	u32 src_ip_v6_3;
+	u32 src_ip_v6_4;
+	u32 dst_ip_v6_2;
+	u32 dst_ip_v6_3;
+	u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 ka_timeout;
+	u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+	u8 snd_seq_scale;
+	u8 ttl;
+	u8 tos;
+	u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_max_probe_count;
+	u8 tos;
+	u8 ttl;
+	u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pmtu;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u16 pmtu;
+#endif
+	u32 rcv_buf;
+	u32 snd_buf;
+	u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 l2hdr_nbytes;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa0;
+	u8 sa1;
+	u8 sa2;
+	u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sa3;
+	u8 sa2;
+	u8 sa1;
+	u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa4;
+	u8 sa5;
+	u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+	u16 etype;
+	u8 sa5;
+	u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 vlan_tag;
+	u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+	u16 ipid_start;
+	u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ipid_count;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 ipid_count;
+#endif
+	u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 pg_cid;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+	u8 pg_unused_a;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u8 pg_unused_a;
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserverd3;
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+	u32 reserved4;
+	u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 0000000..0638096
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
+/* cnic_if.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#define CNIC_MODULE_VERSION	"2.0.0"
+#define CNIC_MODULE_RELDATE	"May 21, 2009"
+
+#define CNIC_ULP_RDMA		0
+#define CNIC_ULP_ISCSI		1
+#define CNIC_ULP_L4		2
+#define MAX_CNIC_ULP_TYPE_EXT	2
+#define MAX_CNIC_ULP_TYPE	3
+
+struct kwqe {
+	u32 kwqe_op_flag;
+
+#define KWQE_OPCODE_MASK	0x00ff0000
+#define KWQE_OPCODE_SHIFT	16
+#define KWQE_FLAGS_LAYER_SHIFT	28
+#define KWQE_OPCODE(x)		((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+	u32 kwqe_info4;
+	u32 kwqe_info5;
+	u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+};
+
+struct kcqe {
+	u32 kcqe_info0;
+	u32 kcqe_info1;
+	u32 kcqe_info2;
+	u32 kcqe_info3;
+	u32 kcqe_info4;
+	u32 kcqe_info5;
+	u32 kcqe_info6;
+	u32 kcqe_op_flag;
+		#define KCQE_RAMROD_COMPLETION		(0x1<<27) /* Everest */
+		#define KCQE_FLAGS_LAYER_MASK		(0x7<<28)
+		#define KCQE_FLAGS_LAYER_MASK_MISC	(0<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L2	(2<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L3	(3<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L4	(4<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_RDMA	(5<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI	(6<<28)
+		#define KCQE_FLAGS_NEXT 		(1<<31)
+		#define KCQE_FLAGS_OPCODE_MASK		(0xff<<16)
+		#define KCQE_FLAGS_OPCODE_SHIFT		(16)
+		#define KCQE_OPCODE(op)			\
+		(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA	64
+#define MAX_DRV_CTL_DATA	64
+
+#define CNIC_CTL_STOP_CMD		1
+#define CNIC_CTL_START_CMD		2
+#define CNIC_CTL_COMPLETION_CMD		3
+
+#define DRV_CTL_IO_WR_CMD		0x101
+#define DRV_CTL_IO_RD_CMD		0x102
+#define DRV_CTL_CTX_WR_CMD		0x103
+#define DRV_CTL_CTXTBL_WR_CMD		0x104
+#define DRV_CTL_COMPLETION_CMD		0x105
+
+struct cnic_ctl_completion {
+	u32	cid;
+};
+
+struct drv_ctl_completion {
+	u32	comp_count;
+};
+
+struct cnic_ctl_info {
+	int	cmd;
+	union {
+		struct cnic_ctl_completion comp;
+		char bytes[MAX_CNIC_CTL_DATA];
+	} data;
+};
+
+struct drv_ctl_io {
+	u32		cid_addr;
+	u32		offset;
+	u32		data;
+	dma_addr_t	dma_addr;
+};
+
+struct drv_ctl_info {
+	int	cmd;
+	union {
+		struct drv_ctl_completion comp;
+		struct drv_ctl_io io;
+		char bytes[MAX_DRV_CTL_DATA];
+	} data;
+};
+
+struct cnic_ops {
+	struct module	*cnic_owner;
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+	int		(*cnic_handler)(void *, void *);
+	int		(*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC	8
+
+struct cnic_irq {
+	unsigned int	vector;
+	void		*status_blk;
+	u32		status_blk_num;
+	u32		irq_flags;
+#define CNIC_IRQ_FL_MSIX		0x00000001
+};
+
+struct cnic_eth_dev {
+	struct module	*drv_owner;
+	u32		drv_state;
+#define CNIC_DRV_STATE_REGD		0x00000001
+#define CNIC_DRV_STATE_USING_MSIX	0x00000002
+	u32		chip_id;
+	u32		max_kwqe_pending;
+	struct pci_dev	*pdev;
+	void __iomem	*io_base;
+
+	u32		ctx_tbl_offset;
+	u32		ctx_tbl_len;
+	int		ctx_blk_size;
+	u32		starting_cid;
+	u32		max_iscsi_conn;
+	u32		max_fcoe_conn;
+	u32		max_rdma_conn;
+	u32		reserved0[2];
+
+	int		num_irq;
+	struct cnic_irq	irq_arr[MAX_CNIC_VEC];
+	int		(*drv_register_cnic)(struct net_device *,
+					     struct cnic_ops *, void *);
+	int		(*drv_unregister_cnic)(struct net_device *);
+	int		(*drv_submit_kwqes_32)(struct net_device *,
+					       struct kwqe *[], u32);
+	int		(*drv_submit_kwqes_16)(struct net_device *,
+					       struct kwqe_16 *[], u32);
+	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+	unsigned long	reserved1[2];
+};
+
+struct cnic_sockaddr {
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} local;
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} remote;
+};
+
+struct cnic_sock {
+	struct cnic_dev *dev;
+	void	*context;
+	u32	src_ip[4];
+	u32	dst_ip[4];
+	u16	src_port;
+	u16	dst_port;
+	u16	vlan_id;
+	unsigned char old_ha[6];
+	unsigned char ha[6];
+	u32	mtu;
+	u32	cid;
+	u32	l5_cid;
+	u32	pg_cid;
+	int	ulp_type;
+
+	u32	ka_timeout;
+	u32	ka_interval;
+	u8	ka_max_probe_count;
+	u8	tos;
+	u8	ttl;
+	u8	snd_seq_scale;
+	u32	rcv_buf;
+	u32	snd_buf;
+	u32	seed;
+
+	unsigned long	tcp_flags;
+#define SK_TCP_NO_DELAY_ACK	0x1
+#define SK_TCP_KEEP_ALIVE	0x2
+#define SK_TCP_NAGLE		0x4
+#define SK_TCP_TIMESTAMP	0x8
+#define SK_TCP_SACK		0x10
+#define SK_TCP_SEG_SCALING	0x20
+	unsigned long	flags;
+#define SK_F_INUSE		0
+#define SK_F_OFFLD_COMPLETE	1
+#define SK_F_OFFLD_SCHED	2
+#define SK_F_PG_OFFLD_COMPLETE	3
+#define SK_F_CONNECT_START	4
+#define SK_F_IPV6		5
+#define SK_F_CLOSING		7
+
+	atomic_t ref_count;
+	u32 state;
+	struct kwqe kwqe1;
+	struct kwqe kwqe2;
+	struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+	struct net_device	*netdev;
+	struct pci_dev		*pcidev;
+	void __iomem		*regview;
+	struct list_head	list;
+
+	int (*register_device)(struct cnic_dev *dev, int ulp_type,
+			       void *ulp_ctx);
+	int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+	int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num_wqes);
+	int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+				u32 num_wqes);
+
+	int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+			 void *);
+	int (*cm_destroy)(struct cnic_sock *);
+	int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+	int (*cm_abort)(struct cnic_sock *);
+	int (*cm_close)(struct cnic_sock *);
+	struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+	int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+				 char *data, u16 data_size);
+	unsigned long	flags;
+#define CNIC_F_CNIC_UP		1
+#define CNIC_F_BNX2_CLASS	3
+#define CNIC_F_BNX2X_CLASS	4
+	atomic_t	ref_count;
+	u8		mac_addr[6];
+
+	int		max_iscsi_conn;
+	int		max_fcoe_conn;
+	int		max_rdma_conn;
+
+	void		*cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val)		writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val)	writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val)		writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off)		readl(dev->regview + off)
+#define CNIC_RD16(dev, off)		readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+
+	void (*cnic_init)(struct cnic_dev *dev);
+	void (*cnic_exit)(struct cnic_dev *dev);
+	void (*cnic_start)(void *ulp_ctx);
+	void (*cnic_stop)(void *ulp_ctx);
+	void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+				u32 num_cqes);
+	void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
+	void (*cm_connect_complete)(struct cnic_sock *);
+	void (*cm_close_complete)(struct cnic_sock *);
+	void (*cm_abort_complete)(struct cnic_sock *);
+	void (*cm_remote_close)(struct cnic_sock *);
+	void (*cm_remote_abort)(struct cnic_sock *);
+	void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
+				  char *data, u16 data_size);
+	struct module *owner;
+};
+
+extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+extern int cnic_unregister_driver(int ulp_type);
+
+#endif
-- 
1.5.6.GIT

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/4] bnx2i: Add bnx2i iSCSI driver.
  2009-05-23 21:11 [PATCH 0/4] Add bnx2i driver Michael Chan
                   ` (2 preceding siblings ...)
       [not found] ` <1243113110-29635-1-git-send-email-mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
@ 2009-05-23 21:11 ` Michael Chan
  2009-05-26 16:37   ` Grant Grundler
  3 siblings, 1 reply; 11+ messages in thread
From: Michael Chan @ 2009-05-23 21:11 UTC (permalink / raw)
  To: James.Bottomley, michaelc; +Cc: davem, linux-scsi, open-iscsi, anilgv, benli

New iSCSI driver for Broadcom BNX2 devices.  The driver interfaces with
the CNIC driver to access the hardware.

Signed-off-by: Anil Veerabhadrappa <anilgv@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
---
 drivers/scsi/Kconfig                      |    1 +
 drivers/scsi/Makefile                     |    1 +
 drivers/scsi/bnx2i/57xx_iscsi_constants.h |  155 ++
 drivers/scsi/bnx2i/57xx_iscsi_hsi.h       | 1509 ++++++++++++++++++
 drivers/scsi/bnx2i/Kconfig                |    7 +
 drivers/scsi/bnx2i/Makefile               |    3 +
 drivers/scsi/bnx2i/bnx2i.h                |  776 ++++++++++
 drivers/scsi/bnx2i/bnx2i_hwi.c            | 2412 +++++++++++++++++++++++++++++
 drivers/scsi/bnx2i/bnx2i_init.c           |  434 ++++++
 drivers/scsi/bnx2i/bnx2i_iscsi.c          | 2082 +++++++++++++++++++++++++
 drivers/scsi/bnx2i/bnx2i_sysfs.c          |  142 ++
 11 files changed, 7522 insertions(+), 0 deletions(-)
 create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_constants.h
 create mode 100644 drivers/scsi/bnx2i/57xx_iscsi_hsi.h
 create mode 100644 drivers/scsi/bnx2i/Kconfig
 create mode 100644 drivers/scsi/bnx2i/Makefile
 create mode 100644 drivers/scsi/bnx2i/bnx2i.h
 create mode 100644 drivers/scsi/bnx2i/bnx2i_hwi.c
 create mode 100644 drivers/scsi/bnx2i/bnx2i_init.c
 create mode 100644 drivers/scsi/bnx2i/bnx2i_iscsi.c
 create mode 100644 drivers/scsi/bnx2i/bnx2i_sysfs.c

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 759e150..6a19ed9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -354,6 +354,7 @@ config ISCSI_TCP
 	 http://open-iscsi.org
 
 source "drivers/scsi/cxgb3i/Kconfig"
+source "drivers/scsi/bnx2i/Kconfig"
 
 config SGIWD93_SCSI
 	tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 8795c30..25429ea 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -129,6 +129,7 @@ obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas/
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
+obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 
 obj-$(CONFIG_ARM)		+= arm/
 
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 0000000..2fceb19
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
+/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_CONSTANTS_H_
+#define __57XX_ISCSI_CONSTANTS_H_
+
+/**
+* This file defines HSI constants for the iSCSI flows
+*/
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_CLEANUP_REQUEST    (7)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_CLEANUP_RESPONSE 		(0x27)
+#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION    (0)
+
+/* iSCSI task types */
+#define ISCSI_TASK_TYPE_READ    (0)
+#define ISCSI_TASK_TYPE_WRITE   (1)
+#define ISCSI_TASK_TYPE_MPATH   (2)
+
+/* initial CQ sequence numbers */
+#define ISCSI_INITIAL_SN    (1)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_KWQE_LAYER_CODE   (6)
+
+/* KWQ (kernel work queue) request op codes */
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
+#define ISCSI_KWQE_OPCODE_UPDATE_CONN   (2)
+#define ISCSI_KWQE_OPCODE_DESTROY_CONN  (3)
+#define ISCSI_KWQE_OPCODE_INIT1         (4)
+#define ISCSI_KWQE_OPCODE_INIT2         (5)
+
+/* KCQ (kernel completion queue) response op codes */
+#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN  (0x10)
+#define ISCSI_KCQE_OPCODE_UPDATE_CONN   (0x12)
+#define ISCSI_KCQE_OPCODE_DESTROY_CONN  (0x13)
+#define ISCSI_KCQE_OPCODE_INIT          (0x14)
+#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK	(0x15)
+#define ISCSI_KCQE_OPCODE_TCP_RESET     (0x16)
+#define ISCSI_KCQE_OPCODE_TCP_SYN       (0x17)
+#define ISCSI_KCQE_OPCODE_TCP_FIN       (0X18)
+#define ISCSI_KCQE_OPCODE_TCP_ERROR     (0x19)
+#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define ISCSI_KCQE_OPCODE_ISCSI_ERROR   (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS                            (0x0)
+#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE                     (0x1)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE                  (0x2)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE                   (0x3)
+#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR                          (0x4)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR                        (0x5)
+#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR                       (0x6)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE     (0xa)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE                (0xb)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN               (0xc)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT                   (0xd)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN                (0xe)
+
+/* Response */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN            (0xf)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T              (0x10)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO  (0x2c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG  (0x2d)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0                 (0x11)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1                 (0x12)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2                 (0x13)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3                 (0x14)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4                 (0x15)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5                 (0x16)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6                 (0x17)
+
+/* Data-In */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN        (0x18)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN       (0x19)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO            (0x1a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV          (0x1b)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN                (0x1c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN      (0x1d)
+
+/* R2T */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF            (0x1f)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN                   (0x20)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN                 (0x21)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED       (0x24)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV           (0x25)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN         (0x26)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
+
+/* TMF */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN        (0x28)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN         (0x29)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN         (0x2a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP   (0x2b)
+
+/* IP/TCP processing errors: */
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT               (0x40)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS                (0x41)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG               (0x42)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS                (0x43)
+
+/* iSCSI licensing errors */
+/* general iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED                (0x50)
+/* additional LOM specific iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED              (0x51)
+
+/* SQ/RQ/CQ DB structure sizes */
+#define ISCSI_SQ_DB_SIZE    (16)
+#define ISCSI_RQ_DB_SIZE    (16)
+#define ISCSI_CQ_DB_SIZE    (80)
+
+#define ISCSI_SQN_TO_NOTIFY_NOT_VALID                                   0xFFFF
+
+/* Page size codes (for flags field in connection offload request) */
+#define ISCSI_PAGE_SIZE_256     (0)
+#define ISCSI_PAGE_SIZE_512     (1)
+#define ISCSI_PAGE_SIZE_1K      (2)
+#define ISCSI_PAGE_SIZE_2K      (3)
+#define ISCSI_PAGE_SIZE_4K      (4)
+#define ISCSI_PAGE_SIZE_8K      (5)
+#define ISCSI_PAGE_SIZE_16K     (6)
+#define ISCSI_PAGE_SIZE_32K     (7)
+#define ISCSI_PAGE_SIZE_64K     (8)
+#define ISCSI_PAGE_SIZE_128K    (9)
+#define ISCSI_PAGE_SIZE_256K    (10)
+#define ISCSI_PAGE_SIZE_512K    (11)
+#define ISCSI_PAGE_SIZE_1M      (12)
+#define ISCSI_PAGE_SIZE_2M      (13)
+#define ISCSI_PAGE_SIZE_4M      (14)
+#define ISCSI_PAGE_SIZE_8M      (15)
+
+/* Iscsi PDU related defines */
+#define ISCSI_HEADER_SIZE   (48)
+#define ISCSI_DIGEST_SHIFT  (2)
+#define ISCSI_DIGEST_SIZE   (4)
+
+#define B577XX_ISCSI_CONNECTION_TYPE    3
+
+#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 0000000..36af1af
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
+/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_HSI_LINUX_LE__
+#define __57XX_ISCSI_HSI_LINUX_LE__
+
+/*
+ * iSCSI Async CQE
+ */
+struct bnx2i_async_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u8 async_event;
+	u8 async_vcode;
+	u16 param1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 param1;
+	u8 async_vcode;
+	u8 async_event;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 param2;
+	u16 param3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 param3;
+	u16 param2;
+#endif
+	u32 reserved7[3];
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Buffer Descriptor (BD)
+ */
+struct iscsi_bd {
+	u32 buffer_addr_hi;
+	u32 buffer_addr_lo;
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 buffer_length;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buffer_length;
+	u16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+#elif defined(__LITTLE_ENDIAN)
+	u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+	u16 reserved3;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup SQ WQE
+ */
+struct bnx2i_cleanup_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+	u16 reserved3;
+#endif
+	u32 reserved4[10];
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup CQE
+ */
+struct bnx2i_cleanup_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 status;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 status;
+	u8 op_code;
+#endif
+	u32 reserved1[3];
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[7];
+#if defined(__BIG_ENDIAN)
+	u16 reserved6;
+	u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+	u16 reserved6;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct bnx2i_cmd_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ud_buffer_offset;
+	u16 sd_buffer_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sd_buffer_offset;
+	u16 ud_buffer_offset;
+#endif
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+	u32 total_data_transfer_length;
+	u32 cmd_sn;
+	u32 reserved3;
+	u32 cdb[4];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 sd_start_bd_index;
+	u8 ud_start_bd_index;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 ud_start_bd_index;
+	u8 sd_start_bd_index;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * task statistics for write response
+ */
+struct bnx2i_write_resp_task_stat {
+	u32 num_data_ins;
+};
+
+/*
+ * task statistics for read response
+ */
+struct bnx2i_read_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+	u16 num_data_outs;
+	u16 num_r2ts;
+#elif defined(__LITTLE_ENDIAN)
+	u16 num_r2ts;
+	u16 num_data_outs;
+#endif
+};
+
+/*
+ * task statistics for iSCSI cmd response
+ */
+union bnx2i_cmd_resp_task_stat {
+	struct bnx2i_write_resp_task_stat write_stat;
+	struct bnx2i_read_resp_task_stat read_stat;
+};
+
+/*
+ * SCSI Command CQE
+ */
+struct bnx2i_cmd_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+	u8 response;
+	u8 status;
+#elif defined(__LITTLE_ENDIAN)
+	u8 status;
+	u8 response;
+	u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved2;
+	u32 residual_count;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[5];
+	union bnx2i_cmd_resp_task_stat task_stat;
+	u32 reserved6;
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+
+
+/*
+ * firmware middle-path request SQ WQE
+ */
+struct bnx2i_fw_mp_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+	u16 hdr_opaque1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hdr_opaque1;
+	u8 op_attr;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 hdr_opaque2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+	u16 reserved0;
+#endif
+	u32 hdr_opaque3[4];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 reserved3;
+	u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+	u8 reserved3;
+	u16 reserved4;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u8 reserved5;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * firmware response - CQE: used only by firmware
+ */
+struct bnx2i_fw_response {
+	u32 hdr_dword1[2];
+	u32 hdr_exp_cmd_sn;
+	u32 hdr_max_cmd_sn;
+	u32 hdr_ttt;
+	u32 hdr_res_cnt;
+	u32 cqe_flags;
+#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
+#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
+#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
+#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
+#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
+#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
+	u32 stat_sn;
+	u32 hdr_dword2[2];
+	u32 hdr_dword3[2];
+	u32 task_stat;
+	u32 reserved0;
+	u32 hdr_itt;
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI KCQ CQE parameters
+ */
+union iscsi_kcqe_params {
+	u32 reserved0[4];
+};
+
+/*
+ * iSCSI KCQ CQE
+ */
+struct iscsi_kcqe {
+	u32 iscsi_conn_id;
+	u32 completion_status;
+	u32 iscsi_conn_context_id;
+	union iscsi_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+#endif
+};
+
+
+
+/*
+ * iSCSI KWQE header
+ */
+struct iscsi_kwqe_header {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+	u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+	u8 op_code;
+	u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * iSCSI firmware init request 1
+ */
+struct iscsi_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u8 reserved0;
+	u8 num_cqs;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_cqs;
+	u8 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 dummy_buffer_addr_lo;
+	u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 num_ccells_per_conn;
+	u16 num_tasks_per_conn;
+#elif defined(__LITTLE_ENDIAN)
+	u16 num_tasks_per_conn;
+	u16 num_ccells_per_conn;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_wqes_per_page;
+	u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_num_wqes;
+	u16 sq_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cq_log_wqes_per_page;
+	u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+	u16 cq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_num_wqes;
+	u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+	u8 cq_log_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cq_num_pages;
+	u16 sq_num_pages;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_num_pages;
+	u16 cq_num_pages;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 rq_buffer_size;
+	u16 rq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rq_num_wqes;
+	u16 rq_buffer_size;
+#endif
+};
+
+/*
+ * iSCSI firmware init request 2
+ */
+struct iscsi_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 max_cq_sqn;
+#elif defined(__LITTLE_ENDIAN)
+	u16 max_cq_sqn;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 error_bit_map[2];
+	u32 reserved1[5];
+};
+
+/*
+ * Initial iSCSI connection offload request 1
+ */
+struct iscsi_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 sq_page_table_addr_lo;
+	u32 sq_page_table_addr_hi;
+	u32 cq_page_table_addr_lo;
+	u32 cq_page_table_addr_hi;
+	u32 reserved0[3];
+};
+
+/*
+ * iSCSI Page Table Entry (PTE)
+ */
+struct iscsi_pte {
+	u32 hi;
+	u32 lo;
+};
+
+/*
+ * Initial iSCSI connection offload request 2
+ */
+struct iscsi_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 rq_page_table_addr_lo;
+	u32 rq_page_table_addr_hi;
+	struct iscsi_pte sq_first_pte;
+	struct iscsi_pte cq_first_pte;
+	u32 num_additional_wqes;
+};
+
+
+/*
+ * Initial iSCSI connection offload request 3
+ */
+struct iscsi_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 reserved1;
+	struct iscsi_pte qp_first_pte[3];
+};
+
+
+/*
+ * iSCSI connection update request
+ */
+struct iscsi_kwqe_conn_update {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 session_error_recovery_level;
+	u8 max_outstanding_r2ts;
+	u8 reserved2;
+	u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+	u8 reserved2;
+	u8 max_outstanding_r2ts;
+	u8 session_error_recovery_level;
+#endif
+	u32 context_id;
+	u32 max_send_pdu_length;
+	u32 max_recv_pdu_length;
+	u32 first_burst_length;
+	u32 max_burst_length;
+	u32 exp_stat_sn;
+};
+
+/*
+ * iSCSI destroy connection request
+ */
+struct iscsi_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+	struct iscsi_kwqe_header hdr;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	struct iscsi_kwqe_header hdr;
+#endif
+	u32 context_id;
+	u32 reserved1[6];
+};
+
+/*
+ * iSCSI KWQ WQE
+ */
+union iscsi_kwqe {
+	struct iscsi_kwqe_init1 init1;
+	struct iscsi_kwqe_init2 init2;
+	struct iscsi_kwqe_conn_offload1 conn_offload1;
+	struct iscsi_kwqe_conn_offload2 conn_offload2;
+	struct iscsi_kwqe_conn_update conn_update;
+	struct iscsi_kwqe_conn_destroy conn_destroy;
+};
+
+/*
+ * iSCSI Login SQ WQE
+ */
+struct bnx2i_login_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_min;
+	u8 version_max;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 reserved4;
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved8;
+	u8 reserved7;
+	u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+	u8 reserved7;
+	u16 reserved8;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved10;
+	u8 reserved9;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved9;
+	u8 reserved10;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Login CQE
+ */
+struct bnx2i_login_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_active;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_active;
+	u8 version_max;
+	u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 err_code;
+	u8 reserved2;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved2;
+	u8 err_code;
+	u16 reserved3;
+#endif
+	u32 stat_sn;
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 status_class;
+	u8 status_detail;
+	u16 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved4;
+	u8 status_detail;
+	u8 status_class;
+#endif
+	u32 reserved5[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved6;
+	u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+	u16 reserved6;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Logout SQ WQE
+ */
+struct bnx2i_logout_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 reserved4[5];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved6;
+	u8 reserved5;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved5;
+	u8 reserved6;
+	u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Logout CQE
+ */
+struct bnx2i_logout_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 response;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 response;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6[3];
+#if defined(__BIG_ENDIAN)
+	u16 time_to_wait;
+	u16 time_to_retain;
+#elif defined(__LITTLE_ENDIAN)
+	u16 time_to_retain;
+	u16 time_to_wait;
+#endif
+	u32 reserved7[3];
+#if defined(__BIG_ENDIAN)
+	u16 reserved8;
+	u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+	u16 reserved8;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Nop-In CQE
+ */
+struct bnx2i_nop_in_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 ttt;
+	u32 reserved2;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5;
+	u32 lun[2];
+	u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI NOP-OUT SQ WQE
+ */
+struct bnx2i_nop_out_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+	u16 reserved2;
+#endif
+	u32 ttt;
+	u32 cmd_sn;
+	u32 reserved3[2];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u8 reserved6;
+	u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+	u8 reserved6;
+	u16 reserved7;
+#endif
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved9;
+	u8 reserved8;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved8;
+	u8 reserved9;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Reject CQE
+ */
+struct bnx2i_reject_msg {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 reason;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 reason;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5[8];
+	u32 cq_req_sn;
+};
+
+/*
+ * bnx2i iSCSI TMF SQ WQE
+ */
+struct bnx2i_tmf_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+	u16 reserved1;
+#endif
+	u32 ref_itt;
+	u32 cmd_sn;
+	u32 reserved2;
+	u32 ref_cmd_sn;
+	u32 reserved3[3];
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved5;
+	u8 reserved4;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved4;
+	u8 reserved5;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Text SQ WQE
+ */
+struct bnx2i_text_request {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 lun[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+	u16 reserved3;
+#endif
+	u32 ttt;
+	u32 cmd_sn;
+	u32 reserved4[2];
+	u32 resp_bd_list_addr_lo;
+	u32 resp_bd_list_addr_hi;
+	u32 resp_buffer;
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
+	u32 zero_fill;
+	u32 bd_list_addr_lo;
+	u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u8 cq_index;
+	u8 reserved7;
+	u8 reserved6;
+	u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+	u8 num_bds;
+	u8 reserved6;
+	u8 reserved7;
+	u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI SQ WQE
+ */
+union iscsi_request {
+	struct bnx2i_cmd_request cmd;
+	struct bnx2i_tmf_request tmf;
+	struct bnx2i_nop_out_request nop_out;
+	struct bnx2i_login_request login_req;
+	struct bnx2i_text_request text;
+	struct bnx2i_logout_request logout_req;
+	struct bnx2i_cleanup_request cleanup;
+};
+
+
+/*
+ * iSCSI TMF CQE
+ */
+struct bnx2i_tmf_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 reserved1;
+	u8 response;
+	u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved0;
+	u8 response;
+	u8 reserved1;
+	u8 op_code;
+#endif
+	u32 reserved2;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+	u16 reserved5;
+	u8 err_code;
+	u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved4;
+	u8 err_code;
+	u16 reserved5;
+#endif
+	u32 reserved6[7];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+/*
+ * iSCSI Text CQE
+ */
+struct bnx2i_text_response {
+#if defined(__BIG_ENDIAN)
+	u8 op_code;
+	u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+	u8 op_code;
+#endif
+	u32 data_length;
+	u32 exp_cmd_sn;
+	u32 max_cmd_sn;
+	u32 ttt;
+	u32 reserved2;
+#if defined(__BIG_ENDIAN)
+	u16 reserved4;
+	u8 err_code;
+	u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 reserved3;
+	u8 err_code;
+	u16 reserved4;
+#endif
+	u32 reserved5;
+	u32 lun[2];
+	u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+	u16 reserved7;
+	u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+	u16 reserved7;
+#endif
+	u32 cq_req_sn;
+};
+
+/*
+ * iSCSI CQE
+ */
+union iscsi_response {
+	struct bnx2i_cmd_response cmd;
+	struct bnx2i_tmf_response tmf;
+	struct bnx2i_login_response login_resp;
+	struct bnx2i_text_response text;
+	struct bnx2i_logout_response logout_resp;
+	struct bnx2i_cleanup_response cleanup;
+	struct bnx2i_reject_msg reject;
+	struct bnx2i_async_msg async;
+	struct bnx2i_nop_in_msg nop_in;
+};
+
+#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 0000000..820d428
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,7 @@
+config SCSI_BNX2_ISCSI
+	tristate "Broadcom NetXtreme II iSCSI support"
+	select SCSI_ISCSI_ATTRS
+	select CNIC
+	---help---
+	This driver supports iSCSI offload for the Broadcom NetXtreme II
+	devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 0000000..b5802bd
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
+bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
+
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 0000000..7eaf0a1
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,776 @@
+/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#ifndef _BNX2I_H_
+#define _BNX2I_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/kfifo.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "../../net/cnic_if.h"
+#include "57xx_iscsi_hsi.h"
+#include "57xx_iscsi_constants.h"
+
+#define BNX2_ISCSI_DRIVER_NAME		"bnx2i"
+
+#define BNX2I_MAX_ADAPTERS		8
+
+#define ISCSI_MAX_CONNS_PER_HBA		128
+#define ISCSI_MAX_SESS_PER_HBA		ISCSI_MAX_CONNS_PER_HBA
+#define ISCSI_MAX_CMDS_PER_SESS		128
+
+/* Total active commands across all connections supported by devices */
+#define ISCSI_MAX_CMDS_PER_HBA_5708	(28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_5709	(128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_57710	(256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+
+#define ISCSI_MAX_BDS_PER_CMD		32
+
+#define MAX_PAGES_PER_CTRL_STRUCT_POOL	8
+#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS	4
+
+#define BNX2I_STATSN_UPDATE_SIGNATURE	0xFABCAFE
+
+/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
+#define MAX_BD_LENGTH			65535
+#define BD_SPLIT_SIZE			32768
+
+/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
+#define BNX2I_SQ_WQES_MIN 		16
+#define BNX2I_570X_SQ_WQES_MAX 		128
+#define BNX2I_5770X_SQ_WQES_MAX 	512
+#define BNX2I_570X_SQ_WQES_DEFAULT 	128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 	256
+
+#define BNX2I_570X_CQ_WQES_MAX 		128
+#define BNX2I_5770X_CQ_WQES_MAX 	512
+
+#define BNX2I_RQ_WQES_MIN 		16
+#define BNX2I_RQ_WQES_MAX 		32
+#define BNX2I_RQ_WQES_DEFAULT 		16
+
+/* CCELLs per conn */
+#define BNX2I_CCELLS_MIN		16
+#define BNX2I_CCELLS_MAX		96
+#define BNX2I_CCELLS_DEFAULT		64
+
+#define ITT_INVALID_SIGNATURE		0xFFFF
+
+#define ISCSI_CMD_CLEANUP_TIMEOUT	100
+
+#define BNX2I_CONN_CTX_BUF_SIZE		16384
+
+#define BNX2I_SQ_WQE_SIZE		64
+#define BNX2I_RQ_WQE_SIZE		256
+#define BNX2I_CQE_SIZE			64
+
+#define MB_KERNEL_CTX_SHIFT		8
+#define MB_KERNEL_CTX_SIZE		(1 << MB_KERNEL_CTX_SHIFT)
+
+#define CTX_SHIFT			7
+#define GET_CID_NUM(cid_addr)		((cid_addr) >> CTX_SHIFT)
+
+#define CTX_OFFSET 			0x10000
+#define MAX_CID_CNT			0x4000
+
+/* 5709 context registers */
+#define BNX2_MQ_CONFIG2			0x00003d00
+#define BNX2_MQ_CONFIG2_CONT_SZ		(0x7L<<4)
+#define BNX2_MQ_CONFIG2_FIRST_L4L5	(0x1fL<<8)
+
+/* 57710's BAR2 is mapped to doorbell registers */
+#define BNX2X_DOORBELL_PCI_BAR		2
+#define BNX2X_MAX_CQS			8
+
+#define CNIC_ARM_CQE			1
+#define CNIC_DISARM_CQE			0
+
+#define REG_RD(__hba, offset)				\
+		readl(__hba->regview + offset)
+#define REG_WR(__hba, offset, val)			\
+		writel(val, __hba->regview + offset)
+
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf:            driver buffer used to stage payload associated with
+ *                      the login request
+ * @req_dma_addr:       dma address for iscsi login request payload buffer
+ * @req_buf_size:       actual login request payload length
+ * @req_wr_ptr:         pointer into login request buffer when next data is
+ *                      to be written
+ * @resp_hdr:           iscsi header where iscsi login response header is to
+ *                      be recreated
+ * @resp_buf:           buffer to stage login response payload
+ * @resp_dma_addr:      login response payload buffer dma address
+ * @resp_buf_size:      login response paylod length
+ * @resp_wr_ptr:        pointer into login response buffer when next data is
+ *                      to be written
+ * @req_bd_tbl:         iscsi login request payload BD table
+ * @req_bd_dma:         login request BD table dma address
+ * @resp_bd_tbl:        iscsi login response payload BD table
+ * @resp_bd_dma:        login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ *	Logout and NOP
+ */
+struct generic_pdu_resc {
+	char *req_buf;
+	dma_addr_t req_dma_addr;
+	u32 req_buf_size;
+	char *req_wr_ptr;
+	struct iscsi_hdr resp_hdr;
+	char *resp_buf;
+	dma_addr_t resp_dma_addr;
+	u32 resp_buf_size;
+	char *resp_wr_ptr;
+	char *req_bd_tbl;
+	dma_addr_t req_bd_dma;
+	char *resp_bd_tbl;
+	dma_addr_t resp_bd_dma;
+};
+
+
+/**
+ * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
+ *
+ * @link:               list head to link elements
+ * @max_ptrs:           maximun pointers that can be stored in this page
+ * @num_valid:          number of pointer valid in this page
+ * @page:               base addess for page pointer array
+ *
+ * structure to track DMA'able memory allocated for command BD tables
+ */
+struct bd_resc_page {
+	struct list_head link;
+	u32 max_ptrs;
+	u32 num_valid;
+	void *page[1];
+};
+
+
+/**
+ * struct io_bdt - I/O buffer destricptor table
+ *
+ * @bd_tbl:             BD table's virtual address
+ * @bd_tbl_dma:         BD table's dma address
+ * @bd_valid:           num valid BD entries
+ *
+ * IO BD table
+ */
+struct io_bdt {
+	struct iscsi_bd *bd_tbl;
+	dma_addr_t bd_tbl_dma;
+	u16 bd_valid;
+};
+
+
+/**
+ * bnx2i_cmd - iscsi command structure
+ *
+ * @scsi_cmd:           SCSI-ML task pointer corresponding to this iscsi cmd
+ * @sg:                 SG list
+ * @io_tbl:             buffer descriptor (BD) table
+ * @bd_tbl_dma:         buffer descriptor (BD) table's dma address
+ */
+struct bnx2i_cmd {
+	struct iscsi_hdr hdr;
+	struct bnx2i_conn *conn;
+	struct scsi_cmnd *scsi_cmd;
+	struct scatterlist *sg;
+	struct io_bdt io_tbl;
+	dma_addr_t bd_tbl_dma;
+	struct bnx2i_cmd_request req;
+};
+
+
+/**
+ * struct bnx2i_conn - iscsi connection structure
+ *
+ * @cls_conn:              pointer to iscsi cls conn
+ * @hba:                   adapter structure pointer
+ * @exp_statsn:            iscsi expected statsn
+ * @iscsi_conn_cid:        iscsi conn id
+ * @fw_cid:                firmware iscsi context id
+ * @ep:                    endpoint structure pointer
+ * @gen_pdu:               login/nopout/logout pdu resources
+ * @violation_notified:    bit mask used to track iscsi error/warning messages
+ *                         already printed out
+ *
+ * iSCSI connection structure
+ */
+struct bnx2i_conn {
+	struct iscsi_cls_conn *cls_conn;
+	struct bnx2i_hba *hba;
+	struct completion cmd_cleanup_cmpl;
+	int is_bound;
+
+	u32 exp_statsn;
+	u32 iscsi_conn_cid;
+#define BNX2I_CID_RESERVED	0x5AFF
+	u32 fw_cid;
+
+	struct timer_list poll_timer;
+	/*
+	 * Queue Pair (QP) related structure elements.
+	 */
+	struct bnx2i_endpoint *ep;
+
+	/*
+	 * Buffer for login negotiation process
+	 */
+	struct generic_pdu_resc gen_pdu;
+	u64 violation_notified;
+};
+
+
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base:           queue base memory
+ * @cid_que:                queue memory pointer
+ * @cid_q_prod_idx:         produce index
+ * @cid_q_cons_idx:         consumer index
+ * @cid_q_max_idx:          max index. used to detect wrap around condition
+ * @cid_free_cnt:           queue size
+ * @conn_cid_tbl:           iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+	void *cid_que_base;
+	u32 *cid_que;
+	u32 cid_q_prod_idx;
+	u32 cid_q_cons_idx;
+	u32 cid_q_max_idx;
+	u32 cid_free_cnt;
+	struct bnx2i_conn **conn_cid_tbl;
+};
+
+/**
+ * struct bnx2i_hba - bnx2i adapter structure
+ *
+ * @link:                  list head to link elements
+ * @cnic:                  pointer to cnic device
+ * @pcidev:                pointer to pci dev
+ * @netdev:                pointer to netdev structure
+ * @regview:               mapped PCI register space
+ * @age:                   age, incremented by every recovery
+ * @cnic_dev_type:         cnic device type, 5706/5708/5709/57710
+ * @mail_queue_access:     mailbox queue access mode, applicable to 5709 only
+ * @reg_with_cnic:         indicates whether the device is register with CNIC
+ * @adapter_state:         adapter state, UP, GOING_DOWN, LINK_DOWN
+ * @mtu_supported:         Ethernet MTU supported
+ * @shost:                 scsi host pointer
+ * @max_sqes:              SQ size
+ * @max_rqes:              RQ size
+ * @max_cqes:              CQ size
+ * @num_ccell:             number of command cells per connection
+ * @ofld_conns_active:     active connection list
+ * @max_active_conns:      max offload connections supported by this device
+ * @cid_que:               iscsi cid queue
+ * @ep_rdwr_lock:          read / write lock to synchronize various ep lists
+ * @ep_ofld_list:          connection list for pending offload completion
+ * @ep_destroy_list:       connection list for pending offload completion
+ * @mp_bd_tbl:             BD table to be used with middle path requests
+ * @mp_bd_dma:             DMA address of 'mp_bd_tbl' memory buffer
+ * @dummy_buffer:          Dummy buffer to be used with zero length scsicmd reqs
+ * @dummy_buf_dma:         DMA address of 'dummy_buffer' memory buffer
+ * @lock:              	   lock to synchonize access to hba structure
+ * @pci_did:               PCI device ID
+ * @pci_vid:               PCI vendor ID
+ * @pci_sdid:              PCI subsystem device ID
+ * @pci_svid:              PCI subsystem vendor ID
+ * @pci_func:              PCI function number in system pci tree
+ * @pci_devno:             PCI device number in system pci tree
+ * @num_wqe_sent:          statistic counter, total wqe's sent
+ * @num_cqe_rcvd:          statistic counter, total cqe's received
+ * @num_intr_claimed:      statistic counter, total interrupts claimed
+ * @link_changed_count:    statistic counter, num of link change notifications
+ *                         received
+ * @ipaddr_changed_count:  statistic counter, num times IP address changed while
+ *                         at least one connection is offloaded
+ * @num_sess_opened:       statistic counter, total num sessions opened
+ * @num_conn_opened:       statistic counter, total num conns opened on this hba
+ * @ctx_ccell_tasks:       captures number of ccells and tasks supported by
+ *                         currently offloaded connection, used to decode
+ *                         context memory
+ *
+ * Adapter Data Structure
+ */
+struct bnx2i_hba {
+	struct list_head link;
+	struct cnic_dev *cnic;
+	struct pci_dev *pcidev;
+	struct net_device *netdev;
+	void __iomem *regview;
+
+	u32 age;
+	unsigned long cnic_dev_type;
+		#define BNX2I_NX2_DEV_5706		0x0
+		#define BNX2I_NX2_DEV_5708		0x1
+		#define BNX2I_NX2_DEV_5709		0x2
+		#define BNX2I_NX2_DEV_57710		0x3
+	u32 mail_queue_access;
+		#define BNX2I_MQ_KERNEL_MODE		0x0
+		#define BNX2I_MQ_KERNEL_BYPASS_MODE	0x1
+		#define BNX2I_MQ_BIN_MODE		0x2
+	unsigned long  reg_with_cnic;
+		#define BNX2I_CNIC_REGISTERED		1
+
+	unsigned long  adapter_state;
+		#define ADAPTER_STATE_UP		0
+		#define ADAPTER_STATE_GOING_DOWN	1
+		#define ADAPTER_STATE_LINK_DOWN		2
+		#define ADAPTER_STATE_INIT_FAILED	31
+	unsigned int mtu_supported;
+		#define BNX2I_MAX_MTU_SUPPORTED		1500
+
+	struct Scsi_Host *shost;
+
+	u32 max_sqes;
+	u32 max_rqes;
+	u32 max_cqes;
+	u32 num_ccell;
+
+	int ofld_conns_active;
+
+	int max_active_conns;
+	struct iscsi_cid_queue cid_que;
+
+	rwlock_t ep_rdwr_lock;
+	struct list_head ep_ofld_list;
+	struct list_head ep_destroy_list;
+
+	/*
+	 * BD table to be used with MP (Middle Path requests.
+	 */
+	char *mp_bd_tbl;
+	dma_addr_t mp_bd_dma;
+	char *dummy_buffer;
+	dma_addr_t dummy_buf_dma;
+
+	spinlock_t lock;	/* protects hba structure access */
+	spinlock_t net_dev_lock;/* sync net device access */
+
+	/*
+	 * PCI related info.
+	 */
+	u16 pci_did;
+	u16 pci_vid;
+	u16 pci_sdid;
+	u16 pci_svid;
+	u16 pci_func;
+	u16 pci_devno;
+
+	/*
+	 * Following are a bunch of statistics useful during development
+	 * and later stage for score boarding.
+	 */
+	u32 num_wqe_sent;
+	u32 num_cqe_rcvd;
+	u32 num_intr_claimed;
+	u32 link_changed_count;
+	u32 ipaddr_changed_count;
+	u32 num_sess_opened;
+	u32 num_conn_opened;
+	unsigned int ctx_ccell_tasks;
+};
+
+
+/*******************************************************************************
+ * 	QP [ SQ / RQ / CQ ] info.
+ ******************************************************************************/
+
+/*
+ * SQ/RQ/CQ generic structure definition
+ */
+struct 	sqe {
+	u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
+};
+
+struct 	rqe {
+	u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
+};
+
+struct 	cqe {
+	u8 cqe_byte[BNX2I_CQE_SIZE];
+};
+
+
+enum {
+#if defined(__LITTLE_ENDIAN)
+	CNIC_EVENT_COAL_INDEX	= 0x0,
+	CNIC_SEND_DOORBELL	= 0x4,
+	CNIC_EVENT_CQ_ARM	= 0x7,
+	CNIC_RECV_DOORBELL	= 0x8
+#elif defined(__BIG_ENDIAN)
+	CNIC_EVENT_COAL_INDEX	= 0x2,
+	CNIC_SEND_DOORBELL	= 0x6,
+	CNIC_EVENT_CQ_ARM	= 0x4,
+	CNIC_RECV_DOORBELL	= 0xa
+#endif
+};
+
+
+/*
+ * CQ DB
+ */
+struct bnx2x_iscsi_cq_pend_cmpl {
+	/* CQ producer, updated by Ustorm */
+	u16 ustrom_prod;
+	/* CQ pending completion counter */
+	u16 pend_cntr;
+};
+
+
+struct bnx2i_5771x_cq_db {
+	struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
+	/* CQ pending completion ITT array */
+	u16 itt[BNX2X_MAX_CQS];
+	/* Cstorm CQ sequence to notify array, updated by driver */;
+	u16 sqn[BNX2X_MAX_CQS];
+	u32 reserved[4] /* 16 byte allignment */;
+};
+
+
+struct bnx2i_5771x_sq_rq_db {
+	u16 prod_idx;
+	u8 reserved0[14]; /* Pad structure size to 16 bytes */
+};
+
+
+struct bnx2i_5771x_dbell_hdr {
+	u8 header;
+	/* 1 for rx doorbell, 0 for tx doorbell */
+#define B577XX_DOORBELL_HDR_RX				(0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT			0
+	/* 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define B577XX_DOORBELL_HDR_DB_TYPE			(0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT		1
+	/* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
+#define B577XX_DOORBELL_HDR_DPM_SIZE			(0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT		2
+	/* connection type */
+#define B577XX_DOORBELL_HDR_CONN_TYPE			(0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT		4
+};
+
+struct bnx2i_5771x_dbell {
+	struct bnx2i_5771x_dbell_hdr dbell;
+	u8 pad[3];
+
+};
+
+/**
+ * struct qp_info - QP (share queue region) atrributes structure
+ *
+ * @ctx_base:           ioremapped pci register base to access doorbell register
+ *                      pertaining to this offloaded connection
+ * @sq_virt:            virtual address of send queue (SQ) region
+ * @sq_phys:            DMA address of SQ memory region
+ * @sq_mem_size:        SQ size
+ * @sq_prod_qe:         SQ producer entry pointer
+ * @sq_cons_qe:         SQ consumer entry pointer
+ * @sq_first_qe:        virtaul address of first entry in SQ
+ * @sq_last_qe:         virtaul address of last entry in SQ
+ * @sq_prod_idx:        SQ producer index
+ * @sq_cons_idx:        SQ consumer index
+ * @sqe_left:           number sq entry left
+ * @sq_pgtbl_virt:      page table describing buffer consituting SQ region
+ * @sq_pgtbl_phys:      dma address of 'sq_pgtbl_virt'
+ * @sq_pgtbl_size:      SQ page table size
+ * @cq_virt:            virtual address of completion queue (CQ) region
+ * @cq_phys:            DMA address of RQ memory region
+ * @cq_mem_size:        CQ size
+ * @cq_prod_qe:         CQ producer entry pointer
+ * @cq_cons_qe:         CQ consumer entry pointer
+ * @cq_first_qe:        virtaul address of first entry in CQ
+ * @cq_last_qe:         virtaul address of last entry in CQ
+ * @cq_prod_idx:        CQ producer index
+ * @cq_cons_idx:        CQ consumer index
+ * @cqe_left:           number cq entry left
+ * @cqe_size:           size of each CQ entry
+ * @cqe_exp_seq_sn:     next expected CQE sequence number
+ * @cq_pgtbl_virt:      page table describing buffer consituting CQ region
+ * @cq_pgtbl_phys:      dma address of 'cq_pgtbl_virt'
+ * @cq_pgtbl_size:    	CQ page table size
+ * @rq_virt:            virtual address of receive queue (RQ) region
+ * @rq_phys:            DMA address of RQ memory region
+ * @rq_mem_size:        RQ size
+ * @rq_prod_qe:         RQ producer entry pointer
+ * @rq_cons_qe:         RQ consumer entry pointer
+ * @rq_first_qe:        virtaul address of first entry in RQ
+ * @rq_last_qe:         virtaul address of last entry in RQ
+ * @rq_prod_idx:        RQ producer index
+ * @rq_cons_idx:        RQ consumer index
+ * @rqe_left:           number rq entry left
+ * @rq_pgtbl_virt:      page table describing buffer consituting RQ region
+ * @rq_pgtbl_phys:      dma address of 'rq_pgtbl_virt'
+ * @rq_pgtbl_size:      RQ page table size
+ *
+ * queue pair (QP) is a per connection shared data structure which is used
+ *	to send work requests (SQ), receive completion notifications (CQ)
+ *	and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
+ *	below holds queue memory, consumer/producer indexes and page table
+ *	information
+ */
+struct qp_info {
+	void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE			0x40
+
+#define BNX2I_570x_QUE_DB_SIZE		0
+#define BNX2I_5771x_QUE_DB_SIZE		16
+	struct sqe *sq_virt;
+	dma_addr_t sq_phys;
+	u32 sq_mem_size;
+
+	struct sqe *sq_prod_qe;
+	struct sqe *sq_cons_qe;
+	struct sqe *sq_first_qe;
+	struct sqe *sq_last_qe;
+	u16 sq_prod_idx;
+	u16 sq_cons_idx;
+	u32 sqe_left;
+
+	void *sq_pgtbl_virt;
+	dma_addr_t sq_pgtbl_phys;
+	u32 sq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+
+	struct cqe *cq_virt;
+	dma_addr_t cq_phys;
+	u32 cq_mem_size;
+
+	struct cqe *cq_prod_qe;
+	struct cqe *cq_cons_qe;
+	struct cqe *cq_first_qe;
+	struct cqe *cq_last_qe;
+	u16 cq_prod_idx;
+	u16 cq_cons_idx;
+	u32 cqe_left;
+	u32 cqe_size;
+	u32 cqe_exp_seq_sn;
+
+	void *cq_pgtbl_virt;
+	dma_addr_t cq_pgtbl_phys;
+	u32 cq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+
+	struct rqe *rq_virt;
+	dma_addr_t rq_phys;
+	u32 rq_mem_size;
+
+	struct rqe *rq_prod_qe;
+	struct rqe *rq_cons_qe;
+	struct rqe *rq_first_qe;
+	struct rqe *rq_last_qe;
+	u16 rq_prod_idx;
+	u16 rq_cons_idx;
+	u32 rqe_left;
+
+	void *rq_pgtbl_virt;
+	dma_addr_t rq_pgtbl_phys;
+	u32 rq_pgtbl_size;	/* set to PAGE_SIZE for 5708 & 5709 */
+};
+
+
+
+/*
+ * CID handles
+ */
+struct ep_handles {
+	u32 fw_cid;
+	u32 drv_iscsi_cid;
+	u16 pg_cid;
+	u16 rsvd;
+};
+
+
+enum {
+	EP_STATE_IDLE                   = 0x0,
+	EP_STATE_PG_OFLD_START          = 0x1,
+	EP_STATE_PG_OFLD_COMPL          = 0x2,
+	EP_STATE_OFLD_START             = 0x4,
+	EP_STATE_OFLD_COMPL             = 0x8,
+	EP_STATE_CONNECT_START          = 0x10,
+	EP_STATE_CONNECT_COMPL          = 0x20,
+	EP_STATE_ULP_UPDATE_START       = 0x40,
+	EP_STATE_ULP_UPDATE_COMPL       = 0x80,
+	EP_STATE_DISCONN_START          = 0x100,
+	EP_STATE_DISCONN_COMPL          = 0x200,
+	EP_STATE_CLEANUP_START          = 0x400,
+	EP_STATE_CLEANUP_CMPL           = 0x800,
+	EP_STATE_TCP_FIN_RCVD           = 0x1000,
+	EP_STATE_TCP_RST_RCVD           = 0x2000,
+	EP_STATE_PG_OFLD_FAILED         = 0x1000000,
+	EP_STATE_ULP_UPDATE_FAILED      = 0x2000000,
+	EP_STATE_CLEANUP_FAILED         = 0x4000000,
+	EP_STATE_OFLD_FAILED            = 0x8000000,
+	EP_STATE_CONNECT_FAILED         = 0x10000000,
+	EP_STATE_DISCONN_TIMEDOUT       = 0x20000000,
+};
+
+/**
+ * struct bnx2i_endpoint - representation of tcp connection in NX2 world
+ *
+ * @link:               list head to link elements
+ * @hba:                adapter to which this connection belongs
+ * @conn:               iscsi connection this EP is linked to
+ * @sess:               iscsi session this EP is linked to
+ * @cm_sk:              cnic sock struct
+ * @hba_age:            age to detect if 'iscsid' issues ep_disconnect()
+ *                      after HBA reset is completed by bnx2i/cnic/bnx2
+ *                      modules
+ * @state:              tracks offload connection state machine
+ * @teardown_mode:      indicates if conn teardown is abortive or orderly
+ * @qp:                 QP information
+ * @ids:                contains chip allocated *context id* & driver assigned
+ *                      *iscsi cid*
+ * @ofld_timer:         offload timer to detect timeout
+ * @ofld_wait:          wait queue
+ *
+ * Endpoint Structure - equivalent of tcp socket structure
+ */
+struct bnx2i_endpoint {
+	struct list_head link;
+	struct bnx2i_hba *hba;
+	struct bnx2i_conn *conn;
+	struct cnic_sock *cm_sk;
+	u32 hba_age;
+	u32 state;
+	unsigned long timestamp;
+	int num_active_cmds;
+
+	struct qp_info qp;
+	struct ep_handles ids;
+		#define ep_iscsi_cid	ids.drv_iscsi_cid
+		#define ep_cid		ids.fw_cid
+		#define ep_pg_cid	ids.pg_cid
+	struct timer_list ofld_timer;
+	wait_queue_head_t ofld_wait;
+};
+
+
+
+/* Global variables */
+extern unsigned int error_mask1, error_mask2;
+extern u64 iscsi_error_mask;
+extern unsigned int en_tcp_dack;
+extern unsigned int event_coal_div;
+
+extern struct scsi_transport_template *bnx2i_scsi_xport_template;
+extern struct iscsi_transport bnx2i_iscsi_transport;
+extern struct cnic_ulp_ops bnx2i_cnic_cb;
+
+extern unsigned int sq_size;
+extern unsigned int rq_size;
+
+extern struct device_attribute *bnx2i_dev_attributes[];
+
+
+
+/*
+ * Function Prototypes
+ */
+extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_register_device(struct bnx2i_hba *hba);
+extern void bnx2i_check_nx2_dev_busy(void);
+
+extern void bnx2i_ulp_init(struct cnic_dev *dev);
+extern void bnx2i_ulp_exit(struct cnic_dev *dev);
+extern void bnx2i_start(void *handle);
+extern void bnx2i_stop(void *handle);
+extern void bnx2i_reg_dev_all(void);
+extern void bnx2i_unreg_dev_all(void);
+extern struct bnx2i_hba *get_adapter_list_head(void);
+
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+					  u16 iscsi_cid);
+
+int bnx2i_alloc_ep_pool(void);
+void bnx2i_release_ep_pool(void);
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
+
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
+
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
+void bnx2i_free_hba(struct bnx2i_hba *hba);
+
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
+
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
+
+void bnx2i_drop_session(struct iscsi_cls_session *session);
+
+extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
+extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
+				  struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
+				  struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
+				    struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
+				   struct iscsi_task *mtask, u32 ttt,
+				   char *datap, int data_len, int unsol);
+extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
+				   struct iscsi_task *mtask);
+extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
+				       struct bnx2i_cmd *cmd);
+extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+				    struct bnx2i_endpoint *ep);
+
+extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
+			       struct bnx2i_endpoint *ep);
+extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
+			       struct bnx2i_endpoint *ep);
+extern void bnx2i_ep_ofld_timer(unsigned long data);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
+		struct bnx2i_hba *hba, u32 iscsi_cid);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
+		struct bnx2i_hba *hba, u32 iscsi_cid);
+
+extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
+extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+
+/* Debug related function prototypes */
+extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+
+#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 0000000..d404e5f
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2412 @@
+/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+/**
+ * bnx2i_get_cid_num - get cid from ep
+ * @ep: 	endpoint pointer
+ *
+ * Only applicable to 57710 family of devices
+ */
+static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
+{
+	u32 cid;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		cid = ep->ep_cid;
+	else
+		cid = GET_CID_NUM(ep->ep_cid);
+	return cid;
+}
+
+
+/**
+ * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
+ * @hba: 		Adapter for which adjustments is to be made
+ *
+ * Only applicable to 57710 family of devices
+ */
+static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
+{
+	u32 num_elements_per_pg;
+
+	if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
+	    test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
+	    test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+		if (!is_power_of_2(hba->max_sqes))
+			hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
+
+		if (!is_power_of_2(hba->max_rqes))
+			hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
+	}
+
+	/* Adjust each queue size if the user selection does not
+	 * yield integral num of page buffers
+	 */
+	/* adjust SQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	if (hba->max_sqes < num_elements_per_pg)
+		hba->max_sqes = num_elements_per_pg;
+	else if (hba->max_sqes % num_elements_per_pg)
+		hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+
+	/* adjust CQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+	if (hba->max_cqes < num_elements_per_pg)
+		hba->max_cqes = num_elements_per_pg;
+	else if (hba->max_cqes % num_elements_per_pg)
+		hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+
+	/* adjust RQ */
+	num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+	if (hba->max_rqes < num_elements_per_pg)
+		hba->max_rqes = num_elements_per_pg;
+	else if (hba->max_rqes % num_elements_per_pg)
+		hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
+				 ~(num_elements_per_pg - 1);
+}
+
+
+/**
+ * bnx2i_get_link_state - get network interface link state
+ * @hba:	adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+static void bnx2i_get_link_state(struct bnx2i_hba *hba)
+{
+	if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+		set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+	else
+		clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_iscsi_license_error - displays iscsi license related error message
+ * @hba:		adapter instance pointer
+ * @error_code:		error classification
+ *
+ * Puts out an error log when driver is unable to offload iscsi connection
+ *	due to license restrictions
+ */
+static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
+{
+	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
+		/* iSCSI offload not supported on this device */
+		printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
+				hba->netdev->name);
+	if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
+		/* iSCSI offload not supported on this LOM device */
+		printk(KERN_ERR "bnx2i: LOM is not enable to "
+				"offload iSCSI connections, dev=%s\n",
+				hba->netdev->name);
+	set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
+ * @ep:		endpoint (transport indentifier) structure
+ * @action:	action, ARM or DISARM. For now only ARM_CQE is used
+ *
+ * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
+ *	the driver. EQ event is generated CQ index is hit or at least 1 CQ is
+ *	outstanding and on chip timer expires
+ */
+void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+{
+	struct bnx2i_5771x_cq_db *cq_db;
+	u16 cq_index;
+
+	if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		return;
+
+	if (action == CNIC_ARM_CQE) {
+		cq_index = ep->qp.cqe_exp_seq_sn +
+			   ep->num_active_cmds / event_coal_div;
+		cq_index %= (ep->qp.cqe_size * 2 + 1);
+		if (!cq_index) {
+			cq_index = 1;
+			cq_db = (struct bnx2i_5771x_cq_db *)
+					ep->qp.cq_pgtbl_virt;
+			cq_db->sqn[0] = cq_index;
+		}
+	}
+}
+
+
+/**
+ * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
+ * @conn:		iscsi connection on which RQ event occured
+ * @ptr:		driver buffer to which RQ buffer contents is to
+ *			be copied
+ * @len:		length of valid data inside RQ buf
+ *
+ * Copies RQ buffer contents from shared (DMA'able) memory region to
+ *	driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
+ *	scsi sense info
+ */
+void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
+{
+	if (!bnx2i_conn->ep->qp.rqe_left)
+		return;
+
+	bnx2i_conn->ep->qp.rqe_left--;
+	memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
+	if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
+		bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
+		bnx2i_conn->ep->qp.rq_cons_idx = 0;
+	} else {
+		bnx2i_conn->ep->qp.rq_cons_qe++;
+		bnx2i_conn->ep->qp.rq_cons_idx++;
+	}
+}
+
+
+static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
+{
+	struct bnx2i_5771x_dbell dbell;
+	u32 msg;
+
+	memset(&dbell, 0, sizeof(dbell));
+	dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
+			      B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
+	msg = *((u32 *)&dbell);
+	/* TODO : get doorbell register mapping */
+	writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
+}
+
+
+/**
+ * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
+ * @conn:	iscsi connection on which event to post
+ * @count:	number of RQ buffer being posted to chip
+ *
+ * No need to ring hardware doorbell for 57710 family of devices
+ */
+void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
+{
+	struct bnx2i_5771x_sq_rq_db *rq_db;
+	u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+	ep->qp.rqe_left += count;
+	ep->qp.rq_prod_idx &= 0x7FFF;
+	ep->qp.rq_prod_idx += count;
+
+	if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
+		ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
+		if (!hi_bit)
+			ep->qp.rq_prod_idx |= 0x8000;
+	} else
+		ep->qp.rq_prod_idx |= hi_bit;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
+		rq_db->prod_idx = ep->qp.rq_prod_idx;
+		/* no need to ring hardware doorbell for 57710 */
+	} else {
+		writew(ep->qp.rq_prod_idx,
+		       ep->qp.ctx_base + CNIC_RECV_DOORBELL);
+	}
+	mmiowb();
+}
+
+
+/**
+ * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
+ * @conn: 		iscsi connection to which new SQ entries belong
+ * @count: 		number of SQ WQEs to post
+ *
+ * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
+ *	of devices. For 5706/5708/5709 new SQ WQE count is written into the
+ *	doorbell register
+ */
+static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
+{
+	struct bnx2i_5771x_sq_rq_db *sq_db;
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+	ep->num_active_cmds++;
+	wmb();	/* flush SQ WQE memory before the doorbell is rung */
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
+		sq_db->prod_idx = ep->qp.sq_prod_idx;
+		bnx2i_ring_577xx_doorbell(bnx2i_conn);
+	} else
+		writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
+
+	mmiowb(); /* flush posted PCI writes */
+}
+
+
+/**
+ * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
+ * @conn:	iscsi connection to which new SQ entries belong
+ * @count:	number of SQ WQEs to post
+ *
+ * this routine will update SQ driver parameters and ring the doorbell
+ */
+static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
+					      int count)
+{
+	int tmp_cnt;
+
+	if (count == 1) {
+		if (bnx2i_conn->ep->qp.sq_prod_qe ==
+		    bnx2i_conn->ep->qp.sq_last_qe)
+			bnx2i_conn->ep->qp.sq_prod_qe =
+						bnx2i_conn->ep->qp.sq_first_qe;
+		else
+			bnx2i_conn->ep->qp.sq_prod_qe++;
+	} else {
+		if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
+		    bnx2i_conn->ep->qp.sq_last_qe)
+			bnx2i_conn->ep->qp.sq_prod_qe += count;
+		else {
+			tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
+				bnx2i_conn->ep->qp.sq_prod_qe;
+			bnx2i_conn->ep->qp.sq_prod_qe =
+				&bnx2i_conn->ep->qp.sq_first_qe[count -
+								(tmp_cnt + 1)];
+		}
+	}
+	bnx2i_conn->ep->qp.sq_prod_idx += count;
+	/* Ring the doorbell */
+	bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
+}
+
+
+/**
+ * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
+			   struct iscsi_task *task)
+{
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_login_request *login_wqe;
+	struct iscsi_login *login_hdr;
+	u32 dword;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	login_hdr = (struct iscsi_login *)task->hdr;
+	login_wqe = (struct bnx2i_login_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+
+	login_wqe->op_code = login_hdr->opcode;
+	login_wqe->op_attr = login_hdr->flags;
+	login_wqe->version_max = login_hdr->max_version;
+	login_wqe->version_min = login_hdr->min_version;
+	login_wqe->data_length = ntoh24(login_hdr->dlength);
+	login_wqe->isid_lo = *((u32 *) login_hdr->isid);
+	login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
+	login_wqe->tsih = login_hdr->tsih;
+	login_wqe->itt = task->itt |
+		(ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
+	login_wqe->cid = login_hdr->cid;
+
+	login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+	login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+
+	login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+	login_wqe->resp_bd_list_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+	dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
+		 (bnx2i_conn->gen_pdu.resp_buf_size <<
+		  ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+	login_wqe->resp_buffer = dword;
+	login_wqe->flags = 0;
+	login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+	login_wqe->bd_list_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+	login_wqe->num_bds = 1;
+	login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
+ * @conn:	iscsi connection
+ * @mtask:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
+			 struct iscsi_task *mtask)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_tm *tmfabort_hdr;
+	struct scsi_cmnd *ref_sc;
+	struct iscsi_task *ctask;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_tmf_request *tmfabort_wqe;
+	u32 dword;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+	tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
+	tmfabort_wqe = (struct bnx2i_tmf_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+
+	tmfabort_wqe->op_code = tmfabort_hdr->opcode;
+	tmfabort_wqe->op_attr = 0;
+	tmfabort_wqe->op_attr =
+		ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
+	tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
+	tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
+
+	tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
+	tmfabort_wqe->reserved2 = 0;
+	tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
+
+	ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
+	if (!ctask || ctask->sc)
+		/*
+		 * the iscsi layer must have completed the cmd while this
+		 * was starting up.
+		 */
+		return 0;
+	ref_sc = ctask->sc;
+
+	if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
+		dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	else
+		dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+	tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
+
+	tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+	tmfabort_wqe->bd_list_addr_hi = (u32)
+				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+	tmfabort_wqe->num_bds = 1;
+	tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
+			     struct bnx2i_cmd *cmd)
+{
+	struct bnx2i_cmd_request *scsi_cmd_wqe;
+
+	scsi_cmd_wqe = (struct bnx2i_cmd_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+	memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
+	scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
+ * @conn:		iscsi connection
+ * @cmd:		driver command structure which is requesting
+ *			a WQE to sent to chip for further processing
+ * @ttt:		TTT to be used when building pdu header
+ * @datap:		payload buffer pointer
+ * @data_len:		payload data length
+ * @unsol:		indicated whether nopout pdu is unsolicited pdu or
+ *			in response to target's NOPIN w/ TTT != FFFFFFFF
+ *
+ * prepare and post a nopout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
+			    struct iscsi_task *task, u32 ttt,
+			    char *datap, int data_len, int unsol)
+{
+	struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_nop_out_request *nopout_wqe;
+	struct iscsi_nopout *nopout_hdr;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	nopout_hdr = (struct iscsi_nopout *)task->hdr;
+	nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+	nopout_wqe->op_code = nopout_hdr->opcode;
+	nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
+	memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		u32 tmp = nopout_hdr->lun[0];
+		/* 57710 requires LUN field to be swapped */
+		nopout_hdr->lun[0] = nopout_hdr->lun[1];
+		nopout_hdr->lun[1] = tmp;
+	}
+
+	nopout_wqe->itt = ((u16)task->itt |
+			   (ISCSI_TASK_TYPE_MPATH <<
+			    ISCSI_TMF_REQUEST_TYPE_SHIFT));
+	nopout_wqe->ttt = ttt;
+	nopout_wqe->flags = 0;
+	if (!unsol)
+		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+	else if (nopout_hdr->itt == RESERVED_ITT)
+		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+
+	nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+	nopout_wqe->data_length = data_len;
+	if (data_len) {
+		/* handle payload data, not required in first release */
+		printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
+	} else {
+		nopout_wqe->bd_list_addr_lo = (u32)
+					bnx2i_conn->hba->mp_bd_dma;
+		nopout_wqe->bd_list_addr_hi =
+			(u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+		nopout_wqe->num_bds = 1;
+	}
+	nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
+ * @conn:	iscsi connection
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepare and post logout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
+			    struct iscsi_task *task)
+{
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct bnx2i_logout_request *logout_wqe;
+	struct iscsi_logout *logout_hdr;
+
+	bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+	logout_hdr = (struct iscsi_logout *)task->hdr;
+
+	logout_wqe = (struct bnx2i_logout_request *)
+						bnx2i_conn->ep->qp.sq_prod_qe;
+	memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
+
+	logout_wqe->op_code = logout_hdr->opcode;
+	logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+	logout_wqe->op_attr =
+			logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
+	logout_wqe->itt = ((u16)task->itt |
+			   (ISCSI_TASK_TYPE_MPATH <<
+			    ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
+	logout_wqe->data_length = 0;
+	logout_wqe->cid = 0;
+
+	logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+	logout_wqe->bd_list_addr_hi = (u32)
+				((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+	logout_wqe->num_bds = 1;
+	logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+	return 0;
+}
+
+
+/**
+ * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
+ * @conn:	iscsi connection which requires iscsi parameter update
+ *
+ * sends down iSCSI Conn Update request to move iSCSI conn to FFP
+ */
+void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
+{
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_hba *hba = bnx2i_conn->hba;
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_update *update_wqe;
+	struct iscsi_kwqe_conn_update conn_update_kwqe;
+
+	update_wqe = &conn_update_kwqe;
+
+	update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
+	update_wqe->hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	/* 5771x requires conn context id to be passed as is */
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
+		update_wqe->context_id = bnx2i_conn->ep->ep_cid;
+	else
+		update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
+	update_wqe->conn_flags = 0;
+	if (conn->hdrdgst_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
+	if (conn->datadgst_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
+	if (conn->session->initial_r2t_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
+	if (conn->session->imm_data_en)
+		update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
+
+	update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
+	update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
+	update_wqe->first_burst_length = conn->session->first_burst;
+	update_wqe->max_burst_length = conn->session->max_burst;
+	update_wqe->exp_stat_sn = bnx2i_conn->exp_statsn;
+	update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
+	update_wqe->session_error_recovery_level = conn->session->erl;
+	iscsi_conn_printk(KERN_ALERT, conn,
+			  "bnx2i: conn update - MBL 0x%x FBL 0x%x"
+			  "MRDSL_I 0x%x MRDSL_T 0x%x \n",
+			  update_wqe->max_burst_length,
+			  update_wqe->first_burst_length,
+			  update_wqe->max_recv_pdu_length,
+			  update_wqe->max_send_pdu_length);
+
+	kwqe_arr[0] = (struct kwqe *) update_wqe;
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
+ * @data:	endpoint (transport handle) structure pointer
+ *
+ * routine to handle connection offload/destroy request timeout
+ */
+void bnx2i_ep_ofld_timer(unsigned long data)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+
+	if (ep->state == EP_STATE_OFLD_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
+		ep->state = EP_STATE_OFLD_FAILED;
+	} else if (ep->state == EP_STATE_DISCONN_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
+		ep->state = EP_STATE_DISCONN_TIMEDOUT;
+	} else if (ep->state == EP_STATE_CLEANUP_START) {
+		printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
+		ep->state = EP_STATE_CLEANUP_FAILED;
+	}
+
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+static int bnx2i_power_of2(u32 val)
+{
+	u32 power = 0;
+	if (val & (val - 1))
+		return power;
+	val--;
+	while (val) {
+		val = val >> 1;
+		power++;
+	}
+	return power;
+}
+
+
+/**
+ * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
+ * @hba:	adapter structure pointer
+ * @cmd:	driver command structure which is requesting
+ *		a WQE to sent to chip for further processing
+ *
+ * prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+	struct bnx2i_cleanup_request *cmd_cleanup;
+
+	cmd_cleanup =
+		(struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
+	memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
+
+	cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
+	cmd_cleanup->itt = cmd->req.itt;
+	cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+	bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
+}
+
+
+/**
+ * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
+ * @hba:	adapter structure pointer
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
+ * 	iscsi connection context clean-up process
+ */
+void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_destroy conn_cleanup;
+
+	memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
+
+	conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
+	conn_cleanup.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+	/* 5771x requires conn context id to be passed as is */
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		conn_cleanup.context_id = ep->ep_cid;
+	else
+		conn_cleanup.context_id = (ep->ep_cid >> 7);
+
+	conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
+
+	kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					  struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[2];
+	struct iscsi_kwqe_conn_offload1 ofld_req1;
+	struct iscsi_kwqe_conn_offload2 ofld_req2;
+	dma_addr_t dma_addr;
+	int num_kwqes = 2;
+	u32 *ptbl;
+
+	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+	ofld_req1.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+	dma_addr = ep->qp.sq_pgtbl_phys;
+	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	dma_addr = ep->qp.cq_pgtbl_phys;
+	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+	ofld_req2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	dma_addr = ep->qp.rq_pgtbl_phys;
+	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+
+	ofld_req2.sq_first_pte.hi = *ptbl++;
+	ofld_req2.sq_first_pte.lo = *ptbl;
+
+	ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+	ofld_req2.cq_first_pte.hi = *ptbl++;
+	ofld_req2.cq_first_pte.lo = *ptbl;
+
+	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+	ofld_req2.num_additional_wqes = 0;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+
+/**
+ * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					   struct bnx2i_endpoint *ep)
+{
+	struct kwqe *kwqe_arr[5];
+	struct iscsi_kwqe_conn_offload1 ofld_req1;
+	struct iscsi_kwqe_conn_offload2 ofld_req2;
+	struct iscsi_kwqe_conn_offload3 ofld_req3[1];
+	dma_addr_t dma_addr;
+	int num_kwqes = 2;
+	u32 *ptbl;
+
+	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+	ofld_req1.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+	dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
+	ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
+	ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+	ofld_req2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
+	ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+	ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+	ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+	ofld_req2.sq_first_pte.hi = *ptbl++;
+	ofld_req2.sq_first_pte.lo = *ptbl;
+
+	ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+	ofld_req2.cq_first_pte.hi = *ptbl++;
+	ofld_req2.cq_first_pte.lo = *ptbl;
+
+	kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+	kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+
+	ofld_req2.num_additional_wqes = 1;
+	memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
+	ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+	ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
+	ofld_req3[0].qp_first_pte[0].lo = *ptbl;
+
+	kwqe_arr[2] = (struct kwqe *) ofld_req3;
+	/* need if we decide to go with multiple KCQE's per conn */
+	num_kwqes += 1;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+/**
+ * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba: 		adapter structure pointer
+ * @ep: 		endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		bnx2i_5771x_send_conn_ofld_req(hba, ep);
+	else
+		bnx2i_570x_send_conn_ofld_req(hba, ep);
+}
+
+
+/**
+ * setup_qp_page_tables - iscsi QP page table setup function
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
+ * 	64-bit address in big endian format. Whereas 10G/sec (57710) requires
+ * 	PT in little endian format
+ */
+static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
+{
+	int num_pages;
+	u32 *ptbl;
+	dma_addr_t page;
+	int cnic_dev_10g;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+		cnic_dev_10g = 1;
+	else
+		cnic_dev_10g = 0;
+
+	/* SQ page table */
+	memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
+	num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+	page = ep->qp.sq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+
+	/* RQ page table */
+	memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
+	num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+	page = ep->qp.rq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+
+	/* CQ page table */
+	memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
+	num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+	page = ep->qp.cq_phys;
+
+	if (cnic_dev_10g)
+		ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+	else
+		ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+	while (num_pages--) {
+		if (cnic_dev_10g) {
+			/* PTE is written in little endian format for 57710 */
+			*ptbl = (u32) page;
+			ptbl++;
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			page += PAGE_SIZE;
+		} else {
+			/* PTE is written in big endian format for
+			 * 5706/5708/5709 devices */
+			*ptbl = (u32) ((u64) page >> 32);
+			ptbl++;
+			*ptbl = (u32) page;
+			ptbl++;
+			page += PAGE_SIZE;
+		}
+	}
+}
+
+
+/**
+ * bnx2i_alloc_qp_resc - allocates required resources for QP.
+ * @hba:	adapter structure pointer
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
+ *	memory for SQ/RQ/CQ and page tables. EP structure elements such
+ *	as producer/consumer indexes/pointers, queue sizes and page table
+ *	contents are setup
+ */
+int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	struct bnx2i_5771x_cq_db *cq_db;
+
+	ep->hba = hba;
+	ep->conn = NULL;
+	ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
+
+	/* Allocate page table memory for SQ which is page aligned */
+	ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
+	ep->qp.sq_mem_size =
+		(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.sq_pgtbl_size =
+		(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.sq_pgtbl_size =
+		(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.sq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+				   &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.sq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
+				  ep->qp.sq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual SQ element */
+	ep->qp.sq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+				   &ep->qp.sq_phys, GFP_KERNEL);
+	if (!ep->qp.sq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
+				  ep->qp.sq_mem_size);
+		goto mem_alloc_err;
+	}
+
+	memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
+	ep->qp.sq_first_qe = ep->qp.sq_virt;
+	ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
+	ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
+	ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
+	ep->qp.sq_prod_idx = 0;
+	ep->qp.sq_cons_idx = 0;
+	ep->qp.sqe_left = hba->max_sqes;
+
+	/* Allocate page table memory for CQ which is page aligned */
+	ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
+	ep->qp.cq_mem_size =
+		(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.cq_pgtbl_size =
+		(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.cq_pgtbl_size =
+		(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.cq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+				   &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.cq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
+				  ep->qp.cq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual CQ element */
+	ep->qp.cq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+				   &ep->qp.cq_phys, GFP_KERNEL);
+	if (!ep->qp.cq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
+				  ep->qp.cq_mem_size);
+		goto mem_alloc_err;
+	}
+	memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
+
+	ep->qp.cq_first_qe = ep->qp.cq_virt;
+	ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
+	ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
+	ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
+	ep->qp.cq_prod_idx = 0;
+	ep->qp.cq_cons_idx = 0;
+	ep->qp.cqe_left = hba->max_cqes;
+	ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+	ep->qp.cqe_size = hba->max_cqes;
+
+	/* Invalidate all EQ CQE index, req only for 57710 */
+	cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+	memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
+
+	/* Allocate page table memory for RQ which is page aligned */
+	ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
+	ep->qp.rq_mem_size =
+		(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	ep->qp.rq_pgtbl_size =
+		(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+	ep->qp.rq_pgtbl_size =
+		(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	ep->qp.rq_pgtbl_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+				   &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
+	if (!ep->qp.rq_pgtbl_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
+				  ep->qp.rq_pgtbl_size);
+		goto mem_alloc_err;
+	}
+
+	/* Allocate memory area for actual RQ element */
+	ep->qp.rq_virt =
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+				   &ep->qp.rq_phys, GFP_KERNEL);
+	if (!ep->qp.rq_virt) {
+		printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
+				  ep->qp.rq_mem_size);
+		goto mem_alloc_err;
+	}
+
+	ep->qp.rq_first_qe = ep->qp.rq_virt;
+	ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
+	ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
+	ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
+	ep->qp.rq_prod_idx = 0x8000;
+	ep->qp.rq_cons_idx = 0;
+	ep->qp.rqe_left = hba->max_rqes;
+
+	setup_qp_page_tables(ep);
+
+	return 0;
+
+mem_alloc_err:
+	bnx2i_free_qp_resc(hba, ep);
+	return -ENOMEM;
+}
+
+
+
+/**
+ * bnx2i_free_qp_resc - free memory resources held by QP
+ * @hba:	adapter structure pointer
+ * @ep:	endpoint (transport indentifier) structure
+ *
+ * Free QP resources - SQ/RQ/CQ memory and page tables.
+ */
+void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+	if (ep->qp.ctx_base) {
+		iounmap(ep->qp.ctx_base);
+		ep->qp.ctx_base = NULL;
+	}
+	/* Free SQ mem */
+	if (ep->qp.sq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+				  ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
+		ep->qp.sq_pgtbl_virt = NULL;
+		ep->qp.sq_pgtbl_phys = 0;
+	}
+	if (ep->qp.sq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+				  ep->qp.sq_virt, ep->qp.sq_phys);
+		ep->qp.sq_virt = NULL;
+		ep->qp.sq_phys = 0;
+	}
+
+	/* Free RQ mem */
+	if (ep->qp.rq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+				  ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
+		ep->qp.rq_pgtbl_virt = NULL;
+		ep->qp.rq_pgtbl_phys = 0;
+	}
+	if (ep->qp.rq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+				  ep->qp.rq_virt, ep->qp.rq_phys);
+		ep->qp.rq_virt = NULL;
+		ep->qp.rq_phys = 0;
+	}
+
+	/* Free CQ mem */
+	if (ep->qp.cq_pgtbl_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+				  ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
+		ep->qp.cq_pgtbl_virt = NULL;
+		ep->qp.cq_pgtbl_phys = 0;
+	}
+	if (ep->qp.cq_virt) {
+		dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+				  ep->qp.cq_virt, ep->qp.cq_phys);
+		ep->qp.cq_virt = NULL;
+		ep->qp.cq_phys = 0;
+	}
+}
+
+
+/**
+ * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
+ * @hba:	adapter structure pointer
+ *
+ * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
+ * 	This results in iSCSi support validation and on-chip context manager
+ * 	initialization.  Firmware completes this handshake with a CQE carrying
+ * 	the result of iscsi support validation. Parameter carried by
+ * 	iscsi init request determines the number of offloaded connection and
+ * 	tolerance level for iscsi protocol violation this hba/chip can support
+ */
+int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+{
+	struct kwqe *kwqe_arr[3];
+	struct iscsi_kwqe_init1 iscsi_init;
+	struct iscsi_kwqe_init2 iscsi_init2;
+	int rc = 0;
+	u64 mask64;
+
+	bnx2i_adjust_qp_size(hba);
+
+	iscsi_init.flags =
+		ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+	if (en_tcp_dack)
+		iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
+	iscsi_init.reserved0 = 0;
+	iscsi_init.num_cqs = 1;
+	iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
+	iscsi_init.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+	iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+	iscsi_init.dummy_buffer_addr_hi =
+		(u32) ((u64) hba->dummy_buf_dma >> 32);
+
+	hba->ctx_ccell_tasks =
+			((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+	iscsi_init.num_ccells_per_conn = hba->num_ccell;
+	iscsi_init.num_tasks_per_conn = hba->max_sqes;
+	iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+	iscsi_init.sq_num_wqes = hba->max_sqes;
+	iscsi_init.cq_log_wqes_per_page =
+		(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+	iscsi_init.cq_num_wqes = hba->max_cqes;
+	iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
+				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+	iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
+				   (PAGE_SIZE - 1)) / PAGE_SIZE;
+	iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
+	iscsi_init.rq_num_wqes = hba->max_rqes;
+
+
+	iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
+	iscsi_init2.hdr.flags =
+		(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+	iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
+	mask64 = 0x0ULL;
+	mask64 |= (
+		/* CISCO MDS */
+		(1UL <<
+		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
+		/* HP MSA1510i */
+		(1UL <<
+		  ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
+		/* EMC */
+		(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
+	if (error_mask1)
+		iscsi_init2.error_bit_map[0] = error_mask1;
+	else
+		iscsi_init2.error_bit_map[0] = (u32) mask64;
+
+	if (error_mask2)
+		iscsi_init2.error_bit_map[1] = error_mask2;
+	else
+		iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
+
+	iscsi_error_mask = mask64;
+
+	kwqe_arr[0] = (struct kwqe *) &iscsi_init;
+	kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
+
+	if (hba->cnic && hba->cnic->submit_kwqes)
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
+	return rc;
+}
+
+
+/**
+ * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
+ * @conn:	iscsi connection
+ * @cqe:	pointer to newly DMA'ed CQE entry for processing
+ *
+ * process SCSI CMD Response CQE & complete the request to SCSI-ML
+ */
+static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+				       struct bnx2i_conn *bnx2i_conn,
+				       struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_cmd_response *resp_cqe;
+	struct bnx2i_cmd *bnx2i_cmd;
+	struct iscsi_task *task;
+	struct iscsi_cmd_rsp *hdr;
+	u32 datalen = 0;
+
+	resp_cqe = (struct bnx2i_cmd_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+	if (!task)
+		goto fail;
+
+	bnx2i_cmd = task->dd_data;
+
+	if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
+		conn->datain_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_data_outs;
+		conn->rxdata_octets +=
+			bnx2i_cmd->req.total_data_transfer_length;
+	} else {
+		conn->dataout_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_data_outs;
+		conn->r2t_pdus_cnt +=
+			resp_cqe->task_stat.read_stat.num_r2ts;
+		conn->txdata_octets +=
+			bnx2i_cmd->req.total_data_transfer_length;
+	}
+	bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
+
+	hdr = (struct iscsi_cmd_rsp *)task->hdr;
+	resp_cqe = (struct bnx2i_cmd_response *)cqe;
+	hdr->opcode = resp_cqe->op_code;
+	hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
+	hdr->response = resp_cqe->response;
+	hdr->cmd_status = resp_cqe->status;
+	hdr->flags = resp_cqe->response_flags;
+	hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
+
+	if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
+		goto done;
+
+	if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
+		datalen = resp_cqe->data_length;
+		if (datalen < 2)
+			goto done;
+
+		if (datalen > BNX2I_RQ_WQE_SIZE) {
+			iscsi_conn_printk(KERN_ERR, conn,
+					  "sense data len %d > RQ sz\n",
+					  datalen);
+			datalen = BNX2I_RQ_WQE_SIZE;
+		} else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+			iscsi_conn_printk(KERN_ERR, conn,
+					  "sense data len %d > conn data\n",
+					  datalen);
+			datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
+		}
+
+		bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
+		bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
+	}
+
+done:
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+			     conn->data, datalen);
+fail:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_process_login_resp - this function handles iscsi login response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process Login Response CQE & complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_login_resp(struct iscsi_session *session,
+				    struct bnx2i_conn *bnx2i_conn,
+				    struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_login_response *login;
+	struct iscsi_login_rsp *resp_hdr;
+	int pld_len;
+	int pad_len;
+
+	login = (struct bnx2i_login_response *) cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = login->op_code;
+	resp_hdr->flags = login->response_flags;
+	resp_hdr->max_version = login->version_max;
+	resp_hdr->active_version = login->version_active;;
+	resp_hdr->hlength = 0;
+
+	hton24(resp_hdr->dlength, login->data_length);
+	memcpy(resp_hdr->isid, &login->isid_lo, 6);
+	resp_hdr->tsih = cpu_to_be16(login->tsih);
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->statsn = cpu_to_be32(login->stat_sn);
+	resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
+	resp_hdr->status_class = login->status_class;
+	resp_hdr->status_detail = login->status_detail;
+	pld_len = login->data_length;
+	bnx2i_conn->gen_pdu.resp_wr_ptr =
+					bnx2i_conn->gen_pdu.resp_buf + pld_len;
+
+	pad_len = 0;
+	if (pld_len & 0x3)
+		pad_len = 4 - (pld_len % 4);
+
+	if (pad_len) {
+		int i = 0;
+		for (i = 0; i < pad_len; i++) {
+			bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+			bnx2i_conn->gen_pdu.resp_wr_ptr++;
+		}
+	}
+
+	/*
+	 * check if this is the first login response for this connection.
+	 * If yes, we need to copy initial StatSN to connection structure.
+	 */
+	if (bnx2i_conn->exp_statsn == BNX2I_STATSN_UPDATE_SIGNATURE)
+		bnx2i_conn->exp_statsn = be32_to_cpu(resp_hdr->statsn) + 1;
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+		bnx2i_conn->gen_pdu.resp_buf,
+		bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_tmf_resp - this function handles iscsi TMF response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI TMF Response CQE and wake up the driver eh thread.
+ */
+static int bnx2i_process_tmf_resp(struct iscsi_session *session,
+				  struct bnx2i_conn *bnx2i_conn,
+				  struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_tmf_response *tmf_cqe;
+	struct iscsi_tm_rsp *resp_hdr;
+
+	tmf_cqe = (struct bnx2i_tmf_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = tmf_cqe->op_code;
+	resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
+	resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->response = tmf_cqe->response;
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_logout_resp - this function handles iscsi logout response
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Logout Response CQE & make function call to
+ * notify the user daemon.
+ */
+static int bnx2i_process_logout_resp(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_logout_response *logout;
+	struct iscsi_logout_rsp *resp_hdr;
+
+	logout = (struct bnx2i_logout_response *) cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
+	if (!task)
+		goto done;
+
+	resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = logout->op_code;
+	resp_hdr->flags = logout->response;
+	resp_hdr->hlength = 0;
+
+	resp_hdr->itt = task->hdr->itt;
+	resp_hdr->statsn = task->hdr->exp_statsn;
+	resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
+
+	resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
+	resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
+
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+	spin_unlock(&session->lock);
+	return 0;
+}
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI NOPIN local completion CQE, frees IIT and command structures
+ */
+static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
+					   struct bnx2i_conn *bnx2i_conn,
+					   struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_nop_in_msg *nop_in;
+	struct iscsi_task *task;
+
+	nop_in = (struct bnx2i_nop_in_msg *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+	if (task)
+		iscsi_put_task(task);
+	spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
+ * @conn:	iscsi connection
+ *
+ * Firmware advances RQ producer index for every unsolicited PDU even if
+ *	payload data length is '0'. This function makes corresponding
+ *	adjustments on the driver side to match this f/w behavior
+ */
+static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
+{
+	char dummy_rq_data[2];
+	bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
+	bnx2i_put_rq_buf(bnx2i_conn, 1);
+}
+
+
+/**
+ * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI target's proactive iSCSI NOPIN request
+ */
+static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+	struct bnx2i_nop_in_msg *nop_in;
+	struct iscsi_nopin *hdr;
+	u32 itt;
+	int tgt_async_nop = 0;
+
+	nop_in = (struct bnx2i_nop_in_msg *)cqe;
+	itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
+
+	spin_lock(&session->lock);
+	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
+	memset(hdr, 0, sizeof(struct iscsi_hdr));
+	hdr->opcode = nop_in->op_code;
+	hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
+	hdr->ttt = cpu_to_be32(nop_in->ttt);
+
+	if (itt == (u16) RESERVED_ITT) {
+		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+		hdr->itt = RESERVED_ITT;
+		tgt_async_nop = 1;
+		goto done;
+	}
+
+	/* this is a response to one of our nop-outs */
+	task = iscsi_itt_to_task(conn, itt);
+	if (task) {
+		hdr->flags = ISCSI_FLAG_CMD_FINAL;
+		hdr->itt = task->hdr->itt;
+		hdr->ttt = cpu_to_be32(nop_in->ttt);
+		memcpy(hdr->lun, nop_in->lun, 8);
+	}
+done:
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+	spin_unlock(&session->lock);
+
+	return tgt_async_nop;
+}
+
+
+/**
+ * bnx2i_process_async_mesg - this function handles iscsi async message
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI ASYNC Message
+ */
+static void bnx2i_process_async_mesg(struct iscsi_session *session,
+				     struct bnx2i_conn *bnx2i_conn,
+				     struct cqe *cqe)
+{
+	struct bnx2i_async_msg *async_cqe;
+	struct iscsi_async *resp_hdr;
+	u8 async_event;
+
+	bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+	async_cqe = (struct bnx2i_async_msg *)cqe;
+	async_event = async_cqe->async_event;
+
+	if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				  "async: scsi events not supported\n");
+		return;
+	}
+
+	spin_lock(&session->lock);
+	resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+	resp_hdr->opcode = async_cqe->op_code;
+	resp_hdr->flags = 0x80;
+
+	memcpy(resp_hdr->lun, async_cqe->lun, 8);
+	resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
+	resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
+
+	resp_hdr->async_event = async_cqe->async_event;
+	resp_hdr->async_vcode = async_cqe->async_vcode;
+
+	resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
+	resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
+	resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
+
+	__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
+			     (struct iscsi_hdr *)resp_hdr, NULL, 0);
+	spin_unlock(&session->lock);
+}
+
+
+/**
+ * bnx2i_process_reject_mesg - process iscsi reject pdu
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI REJECT message
+ */
+static void bnx2i_process_reject_mesg(struct iscsi_session *session,
+				      struct bnx2i_conn *bnx2i_conn,
+				      struct cqe *cqe)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct bnx2i_reject_msg *reject;
+	struct iscsi_reject *hdr;
+
+	reject = (struct bnx2i_reject_msg *) cqe;
+	if (reject->data_length) {
+		bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
+		bnx2i_put_rq_buf(bnx2i_conn, 1);
+	} else
+		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+	spin_lock(&session->lock);
+	hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
+	memset(hdr, 0, sizeof(struct iscsi_hdr));
+	hdr->opcode = reject->op_code;
+	hdr->reason = reject->reason;
+	hton24(hdr->dlength, reject->data_length);
+	hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
+	hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
+	hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
+			     reject->data_length);
+	spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
+ * @session:		iscsi session pointer
+ * @bnx2i_conn:		iscsi connection pointer
+ * @cqe:		pointer to newly DMA'ed CQE entry for processing
+ *
+ * process command cleanup response CQE during conn shutdown or error recovery
+ */
+static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
+					   struct bnx2i_conn *bnx2i_conn,
+					   struct cqe *cqe)
+{
+	struct bnx2i_cleanup_response *cmd_clean_rsp;
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_task *task;
+
+	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
+	spin_lock(&session->lock);
+	task = iscsi_itt_to_task(conn,
+			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+	if (!task)
+		printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
+			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+	spin_unlock(&session->lock);
+	complete(&bnx2i_conn->cmd_cleanup_cmpl);
+}
+
+
+
+/**
+ * bnx2i_process_new_cqes - process newly DMA'ed CQE's
+ * @bnx2i_conn:		iscsi connection
+ *
+ * this function is called by generic KCQ handler to process all pending CQE's
+ */
+static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+{
+	struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+	struct iscsi_session *session = conn->session;
+	struct qp_info *qp = &bnx2i_conn->ep->qp;
+	struct bnx2i_nop_in_msg *nopin;
+	int tgt_async_msg;
+
+	while (1) {
+		nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
+		if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
+			break;
+
+		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
+			break;
+
+		tgt_async_msg = 0;
+
+		switch (nopin->op_code) {
+		case ISCSI_OP_SCSI_CMD_RSP:
+		case ISCSI_OP_SCSI_DATA_IN:
+			bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
+						    qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_LOGIN_RSP:
+			bnx2i_process_login_resp(session, bnx2i_conn,
+						 qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_SCSI_TMFUNC_RSP:
+			bnx2i_process_tmf_resp(session, bnx2i_conn,
+					       qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_LOGOUT_RSP:
+			bnx2i_process_logout_resp(session, bnx2i_conn,
+						  qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_NOOP_IN:
+			if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
+						     qp->cq_cons_qe))
+				tgt_async_msg = 1;
+			break;
+		case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
+			bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
+						       qp->cq_cons_qe);
+			break;
+		case ISCSI_OP_ASYNC_EVENT:
+			bnx2i_process_async_mesg(session, bnx2i_conn,
+						 qp->cq_cons_qe);
+			tgt_async_msg = 1;
+			break;
+		case ISCSI_OP_REJECT:
+			bnx2i_process_reject_mesg(session, bnx2i_conn,
+						  qp->cq_cons_qe);
+			break;
+		case ISCSI_OPCODE_CLEANUP_RESPONSE:
+			bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
+						       qp->cq_cons_qe);
+			break;
+		default:
+			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+					  nopin->op_code);
+		}
+
+		if (!tgt_async_msg)
+			bnx2i_conn->ep->num_active_cmds--;
+
+		/* clear out in production version only, till beta keep opcode
+		 * field intact, will be helpful in debugging (context dump)
+		 * nopin->op_code = 0;
+		 */
+		qp->cqe_exp_seq_sn++;
+		if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
+			qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+
+		if (qp->cq_cons_qe == qp->cq_last_qe) {
+			qp->cq_cons_qe = qp->cq_first_qe;
+			qp->cq_cons_idx = 0;
+		} else {
+			qp->cq_cons_qe++;
+			qp->cq_cons_idx++;
+		}
+	}
+	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+}
+
+/**
+ * bnx2i_fastpath_notification - process global event queue (KCQ)
+ * @hba:		adapter structure pointer
+ * @new_cqe_kcqe:	pointer to newly DMA'ed KCQE entry
+ *
+ * Fast path event notification handler, KCQ entry carries context id
+ *	of the connection that has 1 or more pending CQ entries
+ */
+static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
+					struct iscsi_kcqe *new_cqe_kcqe)
+{
+	struct bnx2i_conn *conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
+	conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!conn) {
+		printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
+		return;
+	}
+	if (!conn->ep) {
+		printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
+		return;
+	}
+
+	bnx2i_process_new_cqes(conn);
+}
+
+
+/**
+ * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
+ */
+static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
+					   struct iscsi_kcqe *update_kcqe)
+{
+	struct bnx2i_conn *conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = update_kcqe->iscsi_conn_id;
+	conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!conn) {
+		printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
+		return;
+	}
+	if (!conn->ep) {
+		printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
+		return;
+	}
+
+	if (update_kcqe->completion_status) {
+		printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
+		conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
+	} else
+		conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
+
+	wake_up_interruptible(&conn->ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_recovery_que_add_conn - add connection to recovery queue
+ * @hba:		adapter structure pointer
+ * @bnx2i_conn:		iscsi connection
+ *
+ * Add connection to recovery queue and schedule adapter eh worker
+ */
+static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+					struct bnx2i_conn *bnx2i_conn)
+{
+	iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
+			   ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
+ * bnx2i_process_tcp_error - process error notification on a given connection
+ *
+ * @hba: 		adapter structure pointer
+ * @tcp_err: 		tcp error kcqe pointer
+ *
+ * handles tcp level error notifications from FW.
+ */
+static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
+				    struct iscsi_kcqe *tcp_err)
+{
+	struct bnx2i_conn *bnx2i_conn;
+	u32 iscsi_cid;
+
+	iscsi_cid = tcp_err->iscsi_conn_id;
+	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+	if (!bnx2i_conn) {
+		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+		return;
+	}
+
+	printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
+			  iscsi_cid, tcp_err->completion_status);
+	bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+}
+
+
+/**
+ * bnx2i_process_iscsi_error - process error notification on a given connection
+ * @hba:		adapter structure pointer
+ * @iscsi_err:		iscsi error kcqe pointer
+ *
+ * handles iscsi error notifications from the FW. Firmware based in initial
+ *	handshake classifies iscsi protocol / TCP rfc violation into either
+ *	warning or error indications. If indication is of "Error" type, driver
+ *	will initiate session recovery for that connection/session. For
+ *	"Warning" type indication, driver will put out a system log message
+ *	(there will be only one message for each type for the life of the
+ *	session, this is to avoid un-necessarily overloading the system)
+ */
+static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
+				      struct iscsi_kcqe *iscsi_err)
+{
+	struct bnx2i_conn *bnx2i_conn;
+	u32 iscsi_cid;
+	char warn_notice[] = "iscsi_warning";
+	char error_notice[] = "iscsi_error";
+	char additional_notice[64];
+	char *message;
+	int need_recovery;
+	u64 err_mask64;
+
+	iscsi_cid = iscsi_err->iscsi_conn_id;
+	bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+	if (!bnx2i_conn) {
+		printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+		return;
+	}
+
+	err_mask64 = (0x1ULL << iscsi_err->completion_status);
+
+	if (err_mask64 & iscsi_error_mask) {
+		need_recovery = 0;
+		message = warn_notice;
+	} else {
+		need_recovery = 1;
+		message = error_notice;
+	}
+
+	switch (iscsi_err->completion_status) {
+	case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
+		strcpy(additional_notice, "hdr digest err");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
+		strcpy(additional_notice, "data digest err");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
+		strcpy(additional_notice, "wrong opcode rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
+		strcpy(additional_notice, "AHS len > 0 rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
+		strcpy(additional_notice, "invalid ITT rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
+		strcpy(additional_notice, "wrong StatSN rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
+		strcpy(additional_notice, "wrong DataSN rcvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
+		strcpy(additional_notice, "pend R2T violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
+		strcpy(additional_notice, "ERL0, UO");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
+		strcpy(additional_notice, "ERL0, U1");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
+		strcpy(additional_notice, "ERL0, U2");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
+		strcpy(additional_notice, "ERL0, U3");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
+		strcpy(additional_notice, "ERL0, U4");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
+		strcpy(additional_notice, "ERL0, U5");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
+		strcpy(additional_notice, "ERL0, U6");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
+		strcpy(additional_notice, "invalid resi len");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
+		strcpy(additional_notice, "MRDSL violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
+		strcpy(additional_notice, "F-bit not set");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
+		strcpy(additional_notice, "invalid TTT");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
+		strcpy(additional_notice, "invalid DataSN");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
+		strcpy(additional_notice, "burst len violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
+		strcpy(additional_notice, "buf offset violation");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
+		strcpy(additional_notice, "invalid LUN field");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
+		strcpy(additional_notice, "invalid R2TSN field");
+		break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
+	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
+		strcpy(additional_notice, "invalid cmd len1");
+		break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
+	case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
+		strcpy(additional_notice, "invalid cmd len2");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
+		strcpy(additional_notice,
+		       "pend r2t exceeds MaxOutstandingR2T value");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
+		strcpy(additional_notice, "TTT is rsvd");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
+		strcpy(additional_notice, "MBL violation");
+		break;
+#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO 	\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
+	case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
+		strcpy(additional_notice, "data seg len != 0");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
+		strcpy(additional_notice, "reject pdu len error");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
+		strcpy(additional_notice, "async pdu len error");
+		break;
+	case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
+		strcpy(additional_notice, "nopin pdu len error");
+		break;
+#define BNX2_ERR_PEND_R2T_IN_CLEANUP			\
+	ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
+	case BNX2_ERR_PEND_R2T_IN_CLEANUP:
+		strcpy(additional_notice, "pend r2t in cleanup");
+		break;
+
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
+		strcpy(additional_notice, "IP fragments rcvd");
+		break;
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
+		strcpy(additional_notice, "IP options error");
+		break;
+	case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
+		strcpy(additional_notice, "urgent flag error");
+		break;
+	default:
+		printk(KERN_ALERT "iscsi_err - unknown err %x\n",
+				  iscsi_err->completion_status);
+	}
+
+	if (need_recovery) {
+		iscsi_conn_printk(KERN_ALERT,
+				  bnx2i_conn->cls_conn->dd_data,
+				  "bnx2i: %s - %s\n",
+				  message, additional_notice);
+
+		iscsi_conn_printk(KERN_ALERT,
+				  bnx2i_conn->cls_conn->dd_data,
+				  "conn_err - hostno %d conn %p, "
+				  "iscsi_cid %x cid %x\n",
+				  bnx2i_conn->hba->shost->host_no,
+				  bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
+				  bnx2i_conn->ep->ep_cid);
+		bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+	} else
+		if (!test_and_set_bit(iscsi_err->completion_status,
+				      (void *) &bnx2i_conn->violation_notified))
+			iscsi_conn_printk(KERN_ALERT,
+					  bnx2i_conn->cls_conn->dd_data,
+					  "bnx2i: %s - %s\n",
+					  message, additional_notice);
+}
+
+
+/**
+ * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
+ * @hba:		adapter structure pointer
+ * @conn_destroy:	conn destroy kcqe pointer
+ *
+ * handles connection destroy completion request.
+ */
+static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
+					    struct iscsi_kcqe *conn_destroy)
+{
+	struct bnx2i_endpoint *ep;
+
+	ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
+	if (!ep) {
+		printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
+				  "offload request, unexpected complection\n");
+		return;
+	}
+
+	if (hba != ep->hba) {
+		printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+		return;
+	}
+
+	if (conn_destroy->completion_status) {
+		printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
+		ep->state = EP_STATE_CLEANUP_FAILED;
+	} else
+		ep->state = EP_STATE_CLEANUP_CMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
+ * @hba:		adapter structure pointer
+ * @ofld_kcqe:		conn offload kcqe pointer
+ *
+ * handles initial connection offload completion, ep_connect() thread is
+ *	woken-up to continue with LLP connect process
+ */
+static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
+				    struct iscsi_kcqe *ofld_kcqe)
+{
+	u32 cid_addr;
+	struct bnx2i_endpoint *ep;
+	u32 cid_num;
+
+	ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
+	if (!ep) {
+		printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
+		return;
+	}
+
+	if (hba != ep->hba) {
+		printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+		return;
+	}
+
+	if (ofld_kcqe->completion_status) {
+		if (ofld_kcqe->completion_status ==
+		    ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
+			printk(KERN_ALERT "bnx2i: unable to allocate"
+					  " iSCSI context resources\n");
+		ep->state = EP_STATE_OFLD_FAILED;
+	} else {
+		ep->state = EP_STATE_OFLD_COMPL;
+		cid_addr = ofld_kcqe->iscsi_conn_context_id;
+		cid_num = bnx2i_get_cid_num(ep);
+		ep->ep_cid = cid_addr;
+		ep->qp.ctx_base = NULL;
+	}
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+/**
+ * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * Generic KCQ event handler/dispatcher
+ */
+static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
+				u32 num_cqe)
+{
+	struct bnx2i_hba *hba = context;
+	int i = 0;
+	struct iscsi_kcqe *ikcqe = NULL;
+
+	while (i < num_cqe) {
+		ikcqe = (struct iscsi_kcqe *) kcqe[i++];
+
+		if (ikcqe->op_code ==
+		    ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
+			bnx2i_fastpath_notification(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
+			bnx2i_process_ofld_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
+			bnx2i_process_update_conn_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
+			if (ikcqe->completion_status !=
+			    ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
+				bnx2i_iscsi_license_error(hba, ikcqe->\
+							  completion_status);
+			else {
+				set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+				bnx2i_get_link_state(hba);
+				printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
+						 "ISCSI_INIT passed\n",
+						 (u8)hba->pcidev->bus->number,
+						 hba->pci_devno,
+						 (u8)hba->pci_func);
+
+
+			}
+		} else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
+			bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
+			bnx2i_process_iscsi_error(hba, ikcqe);
+		else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
+			bnx2i_process_tcp_error(hba, ikcqe);
+		else
+			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+					  ikcqe->op_code);
+	}
+}
+
+
+/**
+ * bnx2i_indicate_netevent - Generic netdev event handler
+ * @context:	adapter structure pointer
+ * @event:	event type
+ *
+ * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
+ *	NETDEV_GOING_DOWN and NETDEV_CHANGE
+ */
+static void bnx2i_indicate_netevent(void *context, unsigned long event)
+{
+	struct bnx2i_hba *hba = context;
+
+	switch (event) {
+	case NETDEV_UP:
+		if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+			bnx2i_send_fw_iscsi_init_msg(hba);
+		break;
+	case NETDEV_DOWN:
+		clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+		break;
+	case NETDEV_GOING_DOWN:
+		set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+		iscsi_host_for_each_session(hba->shost,
+					    bnx2i_drop_session);
+		break;
+	case NETDEV_CHANGE:
+		bnx2i_get_link_state(hba);
+		break;
+	default:
+		;
+	}
+}
+
+
+/**
+ * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
+ * @cm_sk: 		cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 TCP connect request.
+ */
+static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+		ep->state = EP_STATE_CONNECT_FAILED;
+	else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
+		ep->state = EP_STATE_CONNECT_COMPL;
+	else
+		ep->state = EP_STATE_CONNECT_FAILED;
+
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_close_cmpl - process tcp conn close completion
+ * @cm_sk:	cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 graceful TCP connect shutdown
+ */
+static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_DISCONN_COMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
+ * @cm_sk:	cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate completion of option-2 abortive TCP connect termination
+ */
+static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_DISCONN_COMPL;
+	wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_remote_close - process received TCP FIN
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to indicate
+ *	async TCP events such as FIN
+ */
+static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_TCP_FIN_RCVD;
+	if (ep->conn)
+		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+/**
+ * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
+ * @hba:		adapter structure pointer
+ * @update_kcqe:	kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *	indicate async TCP events (RST) sent by the peer.
+ */
+static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
+{
+	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+	ep->state = EP_STATE_TCP_RST_RCVD;
+	if (ep->conn)
+		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+
+static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
+			       char *buf, u16 buflen)
+{
+	struct bnx2i_hba *hba;
+
+	hba = bnx2i_find_hba_for_cnic(dev);
+	if (!hba)
+		return;
+
+	if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
+				   msg_type, buf, buflen))
+		printk(KERN_ALERT "bnx2i: private nl message send error\n");
+
+}
+
+
+/**
+ * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
+ *			carrying callback function pointers
+ *
+ */
+struct cnic_ulp_ops bnx2i_cnic_cb = {
+	.cnic_init = bnx2i_ulp_init,
+	.cnic_exit = bnx2i_ulp_exit,
+	.cnic_start = bnx2i_start,
+	.cnic_stop = bnx2i_stop,
+	.indicate_kcqes = bnx2i_indicate_kcqe,
+	.indicate_netevent = bnx2i_indicate_netevent,
+	.cm_connect_complete = bnx2i_cm_connect_cmpl,
+	.cm_close_complete = bnx2i_cm_close_cmpl,
+	.cm_abort_complete = bnx2i_cm_abort_cmpl,
+	.cm_remote_close = bnx2i_cm_remote_close,
+	.cm_remote_abort = bnx2i_cm_remote_abort,
+	.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+	.owner = THIS_MODULE
+};
+
+
+/**
+ * bnx2i_map_ep_dbell_regs - map connection doorbell registers
+ * @ep: bnx2i endpoint
+ *
+ * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
+ *	register in BAR #0. Whereas in 57710 these register are accessed by
+ *	mapping BAR #1
+ */
+int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
+{
+	u32 cid_num;
+	u32 reg_off;
+	u32 first_l4l5;
+	u32 ctx_sz;
+	u32 config2;
+	resource_size_t reg_base;
+
+	cid_num = bnx2i_get_cid_num(ep);
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+		reg_base = pci_resource_start(ep->hba->pcidev,
+					      BNX2X_DOORBELL_PCI_BAR);
+		reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
+		ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+		goto arm_cq;
+	}
+
+	reg_base = ep->hba->netdev->base_addr;
+	if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
+	    (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
+		config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
+		first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
+		ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
+		if (ctx_sz)
+			reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+				  + PAGE_SIZE *
+				  (((cid_num - first_l4l5) / ctx_sz) + 256);
+		else
+			reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+	} else
+		/* 5709 device in normal node and 5706/5708 devices */
+		reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+
+	ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+					  MB_KERNEL_CTX_SIZE);
+	if (!ep->qp.ctx_base)
+		return -ENOMEM;
+
+arm_cq:
+	bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
+	return 0;
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 0000000..c15ac5f
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,434 @@
+/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
+static u32 adapter_count;
+static int bnx2i_reg_device;
+
+#define DRV_MODULE_NAME		"bnx2i"
+#define DRV_MODULE_VERSION	"2.0.1d"
+#define DRV_MODULE_RELDATE	"Mar 25, 2009"
+
+static char version[] __devinitdata =
+		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+		" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static DEFINE_RWLOCK(bnx2i_dev_lock);
+
+unsigned int event_coal_div = 1;
+module_param(event_coal_div, int, 0664);
+MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
+
+unsigned int en_tcp_dack = 1;
+module_param(en_tcp_dack, int, 0664);
+MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
+
+unsigned int error_mask1 = 0x00;
+module_param(error_mask1, int, 0664);
+MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
+
+unsigned int error_mask2 = 0x00;
+module_param(error_mask2, int, 0664);
+MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
+
+unsigned int sq_size;
+module_param(sq_size, int, 0664);
+MODULE_PARM_DESC(sq_size, "Configure SQ size");
+
+unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
+module_param(rq_size, int, 0664);
+MODULE_PARM_DESC(rq_size, "Configure RQ size");
+
+u64 iscsi_error_mask = 0x00;
+
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
+
+
+/**
+ * bnx2i_identify_device - identifies NetXtreme II device type
+ * @hba: 		Adapter structure pointer
+ *
+ * This function identifies the NX2 device type and sets appropriate
+ *	queue mailbox register access method, 5709 requires driver to
+ *	access MBOX regs using *bin* mode
+ */
+void bnx2i_identify_device(struct bnx2i_hba *hba)
+{
+	hba->cnic_dev_type = 0;
+	if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
+		set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+	else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
+		set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+	else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
+	    (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
+		set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+		hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+	} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+		set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+}
+
+
+/**
+ * get_adapter_list_head - returns head of adapter list
+ */
+struct bnx2i_hba *get_adapter_list_head(void)
+{
+	struct bnx2i_hba *hba = NULL;
+	struct bnx2i_hba *tmp_hba;
+
+	if (!adapter_count)
+		goto hba_not_found;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry(tmp_hba, &adapter_list, link) {
+		if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
+			hba = tmp_hba;
+			break;
+		}
+	}
+	read_unlock(&bnx2i_dev_lock);
+hba_not_found:
+	return hba;
+}
+
+
+/**
+ * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
+ * @cnic:	pointer to cnic device instance
+ *
+ */
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link) {
+		if (hba->cnic == cnic) {
+			read_unlock(&bnx2i_dev_lock);
+			return hba;
+		}
+	}
+	read_unlock(&bnx2i_dev_lock);
+	return NULL;
+}
+
+
+/**
+ * bnx2i_start - cnic callback to initialize & start adapter instance
+ * @handle:	transparent handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ *	firmware handshake to enable/initialize on chip iscsi components
+ * 	This bnx2i - cnic interface api callback is issued after following
+ *	2 conditions are met -
+ *	  a) underlying network interface is up (marked by event 'NETDEV_UP'
+ *		from netdev
+ *	  b) bnx2i adapter instance is registered
+ */
+void bnx2i_start(void *handle)
+{
+#define BNX2I_INIT_POLL_TIME	(1000 / HZ)
+	struct bnx2i_hba *hba = handle;
+	int i = HZ;
+
+	bnx2i_send_fw_iscsi_init_msg(hba);
+	while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+		msleep(BNX2I_INIT_POLL_TIME);
+}
+
+
+/**
+ * bnx2i_stop - cnic callback to shutdown adapter instance
+ * @handle:	transparent handle pointing to adapter structure
+ *
+ * driver checks if adapter is already in shutdown mode, if not start
+ *	the shutdown process
+ */
+void bnx2i_stop(void *handle)
+{
+	struct bnx2i_hba *hba = handle;
+
+	/* check if cleanup happened in GOING_DOWN context */
+	clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+	if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
+				&hba->adapter_state))
+		iscsi_host_for_each_session(hba->shost,
+					    bnx2i_drop_session);
+}
+
+/**
+ * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
+ * @hba:	Adapter instance to register
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding the
+ *	adapter structure lock
+ */
+void bnx2i_register_device(struct bnx2i_hba *hba)
+{
+	spin_lock(&hba->lock);	/* called from ep_connect context */
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+	    test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		goto rel_lock;
+	}
+
+	hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
+	bnx2i_reg_device++;
+	set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+rel_lock:
+	spin_unlock(&hba->lock);
+}
+
+
+/**
+ * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
+ *
+ * registers all bnx2i adapter instances with the cnic driver while holding
+ *	the global resource lock
+ */
+void bnx2i_reg_dev_all(void)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link)
+		bnx2i_register_device(hba);
+	read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
+ * @hba:	Adapter instance to unregister
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding
+ *	the adapter structure lock
+ */
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
+{
+	spin_lock(&hba->lock); /* ep_connect/ep_disconnect() */
+	if (hba->ofld_conns_active ||
+	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
+	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
+		spin_unlock(&hba->lock);
+		return;
+	}
+
+	hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+	bnx2i_reg_device--;
+	/* ep_disconnect could come before NETDEV_DOWN, driver won't
+	 * see NETDEV_DOWN as it already unregistered itself.
+	 */
+	hba->adapter_state = 0;
+	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+	spin_unlock(&hba->lock);
+}
+
+/**
+ * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
+ *
+ * unregisters all bnx2i adapter instances with the cnic driver while holding
+ *	the global resource lock
+ */
+void bnx2i_unreg_dev_all(void)
+{
+	struct bnx2i_hba *hba, *temp;
+
+	read_lock(&bnx2i_dev_lock);
+	list_for_each_entry_safe(hba, temp, &adapter_list, link)
+		bnx2i_unreg_one_device(hba);
+	read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_init_one - initialize an adapter instance and allocate memory resources
+ * @hba:	bnx2i adapter instance
+ * @cnic:	cnic device handle
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *	below. This routine is called from cnic_register_driver() context and
+ *	work horse thread which does majority of device specific initialization
+ */
+static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
+{
+	int rc;
+
+	read_lock(&bnx2i_dev_lock);
+	if (bnx2i_reg_device &&
+	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		spin_lock(&hba->lock);	/* hot plug */
+		rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
+		if (rc)		/* duplicate registration */
+			printk(KERN_ERR "bnx2i- dev reg failed\n");
+		bnx2i_reg_device++;
+		hba->age++;
+		set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+		spin_unlock(&hba->lock);
+	}
+	read_unlock(&bnx2i_dev_lock);
+
+	write_lock(&bnx2i_dev_lock);
+	list_add_tail(&hba->link, &adapter_list);
+	adapter_count++;
+	write_unlock(&bnx2i_dev_lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_ulp_init - initialize an adapter instance
+ * @dev:	cnic device handle
+ *
+ * Called from cnic_register_driver() context to initialize all enumerated
+ *	cnic devices. This routine allocate adapter structure and other
+ *	device specific resources.
+ */
+void bnx2i_ulp_init(struct cnic_dev *dev)
+{
+	struct bnx2i_hba *hba;
+
+	/* Allocate a HBA structure for this device */
+	hba = bnx2i_alloc_hba(dev);
+	if (!hba) {
+		printk(KERN_ERR "bnx2i init: hba initialization failed\n");
+		return;
+	}
+
+	/* Get PCI related information and update hba struct members */
+	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+	if (bnx2i_init_one(hba, dev)) {
+		printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
+		bnx2i_free_hba(hba);
+	} else
+		hba->cnic = dev;
+}
+
+
+/**
+ * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
+ * @dev:	cnic device handle
+ *
+ */
+void bnx2i_ulp_exit(struct cnic_dev *dev)
+{
+	struct bnx2i_hba *hba;
+
+	hba = bnx2i_find_hba_for_cnic(dev);
+	if (!hba) {
+		printk(KERN_INFO "bnx2i_ulp_exit: hba not "
+				 "found, dev 0x%p\n", dev);
+		return;
+	}
+	write_lock(&bnx2i_dev_lock);
+	list_del_init(&hba->link);
+	adapter_count--;
+
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		spin_lock(&hba->lock);	/* hot remove */
+		hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+		clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+		bnx2i_reg_device--;
+		spin_unlock(&hba->lock);
+	}
+	write_unlock(&bnx2i_dev_lock);
+
+	bnx2i_free_hba(hba);
+}
+
+
+/**
+ * bnx2i_mod_init - module init entry point
+ *
+ * initialize any driver wide global data structures such as endpoint pool,
+ *	tcp port manager/queue, sysfs. finally driver will register itself
+ *	with the cnic module
+ */
+static int __init bnx2i_mod_init(void)
+{
+	int err;
+
+	printk(KERN_INFO "%s", version);
+
+	if (!is_power_of_2(sq_size))
+		sq_size = roundup_pow_of_two(sq_size);
+
+	bnx2i_scsi_xport_template =
+			iscsi_register_transport(&bnx2i_iscsi_transport);
+	if (!bnx2i_scsi_xport_template) {
+		printk(KERN_ERR "Could not register bnx2i transport.\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
+	if (err) {
+		printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
+		goto unreg_xport;
+	}
+
+	return 0;
+
+unreg_xport:
+	iscsi_unregister_transport(&bnx2i_iscsi_transport);
+out:
+	return err;
+}
+
+
+/**
+ * bnx2i_mod_exit - module cleanup/exit entry point
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *	in this function. Driver will browse through the adapter list, cleans-up
+ *	each instance, unregisters iscsi transport name and finally driver will
+ *	unregister itself with the cnic module
+ */
+static void __exit bnx2i_mod_exit(void)
+{
+	struct bnx2i_hba *hba;
+
+	write_lock(&bnx2i_dev_lock);
+	while (!list_empty(&adapter_list)) {
+		hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
+		list_del(&hba->link);
+		adapter_count--;
+
+		if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+			hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+			clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+			bnx2i_reg_device--;
+		}
+
+		write_unlock(&bnx2i_dev_lock);
+		bnx2i_free_hba(hba);
+		write_lock(&bnx2i_dev_lock);
+	}
+	write_unlock(&bnx2i_dev_lock);
+
+	iscsi_unregister_transport(&bnx2i_iscsi_transport);
+	cnic_unregister_driver(CNIC_ULP_ISCSI);
+}
+
+module_init(bnx2i_mod_init);
+module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 0000000..2b86911
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2082 @@
+/*
+ * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+struct scsi_transport_template *bnx2i_scsi_xport_template;
+struct iscsi_transport bnx2i_iscsi_transport;
+static struct scsi_host_template bnx2i_host_template;
+
+/*
+ * Global endpoint resource info
+ */
+static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+
+
+static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
+{
+	int retval = 0;
+
+	if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+	    test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+		retval = -EPERM;
+	return retval;
+}
+
+/**
+ * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
+ * @cmd:		iscsi cmd struct pointer
+ * @buf_off:		absolute buffer offset
+ * @start_bd_off:	u32 pointer to return the offset within the BD
+ *			indicated by 'start_bd_idx' on which 'buf_off' falls
+ * @start_bd_idx:	index of the BD on which 'buf_off' falls
+ *
+ * identifies & marks various bd info for scsi command's imm data,
+ * unsolicited data and the first solicited data seq.
+ */
+static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
+				       u32 *start_bd_off, u32 *start_bd_idx)
+{
+	struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
+	u32 cur_offset = 0;
+	u32 cur_bd_idx = 0;
+
+	if (buf_off) {
+		while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
+			cur_offset += bd_tbl->buffer_length;
+			cur_bd_idx++;
+			bd_tbl++;
+		}
+	}
+
+	*start_bd_off = buf_off - cur_offset;
+	*start_bd_idx = cur_bd_idx;
+}
+
+/**
+ * bnx2i_setup_write_cmd_bd_info - sets up BD various information
+ * @task:	transport layer's cmd struct pointer
+ *
+ * identifies & marks various bd info for scsi command's immediate data,
+ * unsolicited data and first solicited data seq which includes BD start
+ * index & BD buf off. his function takes into account iscsi parameter such
+ * as immediate data and unsolicited data is support on this connection.
+ */
+static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
+{
+	struct bnx2i_cmd *cmd = task->dd_data;
+	u32 start_bd_offset;
+	u32 start_bd_idx;
+	u32 buffer_offset = 0;
+	u32 cmd_len = cmd->req.total_data_transfer_length;
+
+	/* if ImmediateData is turned off & IntialR2T is turned on,
+	 * there will be no immediate or unsolicited data, just return.
+	 */
+	if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
+		return;
+
+	/* Immediate data */
+	buffer_offset += task->imm_count;
+	if (task->imm_count == cmd_len)
+		return;
+
+	if (iscsi_task_has_unsol_data(task)) {
+		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+					   &start_bd_offset, &start_bd_idx);
+		cmd->req.ud_buffer_offset = start_bd_offset;
+		cmd->req.ud_start_bd_index = start_bd_idx;
+		buffer_offset += task->unsol_r2t.data_length;
+	}
+
+	if (buffer_offset != cmd_len) {
+		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+					   &start_bd_offset, &start_bd_idx);
+		if ((start_bd_offset > task->conn->session->first_burst) ||
+		    (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
+			int i = 0;
+
+			iscsi_conn_printk(KERN_ALERT, task->conn,
+					  "bnx2i- error, buf offset 0x%x "
+					  "bd_valid %d use_sg %d\n",
+					  buffer_offset, cmd->io_tbl.bd_valid,
+					  scsi_sg_count(cmd->scsi_cmd));
+			for (i = 0; i < cmd->io_tbl.bd_valid; i++)
+				iscsi_conn_printk(KERN_ALERT, task->conn,
+						  "bnx2i err, bd[%d]: len %x\n",
+						  i, cmd->io_tbl.bd_tbl[i].\
+						  buffer_length);
+		}
+		cmd->req.sd_buffer_offset = start_bd_offset;
+		cmd->req.sd_start_bd_index = start_bd_idx;
+	}
+}
+
+
+
+/**
+ * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
+ * @hba:	adapter instance
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * map SG list
+ */
+static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+	struct scsi_cmnd *sc = cmd->scsi_cmd;
+	struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+	struct scatterlist *sg;
+	int byte_count = 0;
+	int bd_count = 0;
+	int sg_count;
+	int sg_len;
+	u64 addr;
+	int i;
+
+	BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
+
+	sg_count = scsi_dma_map(sc);
+
+	scsi_for_each_sg(sc, sg, sg_count, i) {
+		sg_len = sg_dma_len(sg);
+		addr = (u64) sg_dma_address(sg);
+		bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
+		bd[bd_count].buffer_addr_hi = addr >> 32;
+		bd[bd_count].buffer_length = sg_len;
+		bd[bd_count].flags = 0;
+		if (bd_count == 0)
+			bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+		byte_count += sg_len;
+		bd_count++;
+	}
+
+	if (bd_count)
+		bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+	BUG_ON(byte_count != scsi_bufflen(sc));
+	return bd_count;
+}
+
+/**
+ * bnx2i_iscsi_map_sg_list - maps SG list
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * creates BD list table for the command
+ */
+static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
+{
+	int bd_count;
+
+	bd_count  = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
+	if (!bd_count) {
+		struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+
+		bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
+		bd[0].buffer_length = bd[0].flags = 0;
+	}
+	cmd->io_tbl.bd_valid = bd_count;
+}
+
+
+/**
+ * bnx2i_iscsi_unmap_sg_list - unmaps SG list
+ * @cmd:	iscsi cmd struct pointer
+ *
+ * unmap IO buffers and invalidate the BD table
+ */
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
+{
+	struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+	if (cmd->io_tbl.bd_valid && sc) {
+		scsi_dma_unmap(sc);
+		cmd->io_tbl.bd_valid = 0;
+	}
+}
+
+static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
+{
+	memset(&cmd->req, 0x00, sizeof(cmd->req));
+	cmd->req.op_code = 0xFF;
+	cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
+	cmd->req.bd_list_addr_hi =
+		(u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
+
+}
+
+
+/**
+ * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
+ * @hba:	pointer to adapter instance
+ * @conn:	pointer to iscsi connection
+ * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
+ *
+ * update iscsi cid table entry with connection pointer. This enables
+ *	driver to quickly get hold of connection structure pointer in
+ *	completion/interrupt thread using iscsi context ID
+ */
+static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
+					struct bnx2i_conn *bnx2i_conn,
+					u32 iscsi_cid)
+{
+	if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				 "conn bind - entry #%d not free\n", iscsi_cid);
+		return -EBUSY;
+	}
+
+	hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
+	return 0;
+}
+
+
+/**
+ * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
+ * @hba:	pointer to adapter instance
+ * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
+ */
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+					  u16 iscsi_cid)
+{
+	if (!hba->cid_que.conn_cid_tbl) {
+		printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
+		return NULL;
+
+	} else if (iscsi_cid >= hba->max_active_conns) {
+		printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
+		return NULL;
+	}
+	return hba->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+
+/**
+ * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
+ * @hba:	pointer to adapter instance
+ */
+static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
+{
+	int idx;
+
+	if (!hba->cid_que.cid_free_cnt)
+		return RESERVED_ITT;
+
+	idx = hba->cid_que.cid_q_cons_idx;
+	hba->cid_que.cid_q_cons_idx++;
+	if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
+		hba->cid_que.cid_q_cons_idx = 0;
+
+	hba->cid_que.cid_free_cnt--;
+	return hba->cid_que.cid_que[idx];
+}
+
+
+/**
+ * bnx2i_free_iscsi_cid - returns tcp port to free list
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to free
+ */
+static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
+{
+	int idx;
+
+	if (iscsi_cid == (u16)RESERVED_ITT)
+		return;
+
+	hba->cid_que.cid_free_cnt++;
+
+	idx = hba->cid_que.cid_q_prod_idx;
+	hba->cid_que.cid_que[idx] = iscsi_cid;
+	hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
+	hba->cid_que.cid_q_prod_idx++;
+	if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
+		hba->cid_que.cid_q_prod_idx = 0;
+}
+
+
+/**
+ * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
+ * @hba:	pointer to adapter instance
+ *
+ * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
+ * 	and initialize table attributes
+ */
+static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
+{
+	int mem_size;
+	int i;
+
+	mem_size = hba->max_active_conns * sizeof(u32);
+	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
+	if (!hba->cid_que.cid_que_base)
+		return -ENOMEM;
+
+	mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
+	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+	hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
+	if (!hba->cid_que.conn_cid_tbl) {
+		kfree(hba->cid_que.cid_que_base);
+		hba->cid_que.cid_que_base = NULL;
+		return -ENOMEM;
+	}
+
+	hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
+	hba->cid_que.cid_q_prod_idx = 0;
+	hba->cid_que.cid_q_cons_idx = 0;
+	hba->cid_que.cid_q_max_idx = hba->max_active_conns;
+	hba->cid_que.cid_free_cnt = hba->max_active_conns;
+
+	for (i = 0; i < hba->max_active_conns; i++) {
+		hba->cid_que.cid_que[i] = i;
+		hba->cid_que.conn_cid_tbl[i] = NULL;
+	}
+	return 0;
+}
+
+
+/**
+ * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
+ * @hba:	pointer to adapter instance
+ */
+static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
+{
+	kfree(hba->cid_que.cid_que_base);
+	hba->cid_que.cid_que_base = NULL;
+
+	kfree(hba->cid_que.conn_cid_tbl);
+	hba->cid_que.conn_cid_tbl = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_ep - allocates ep structure from global pool
+ * @hba:	pointer to adapter instance
+ *
+ * routine allocates a free endpoint structure from global pool and
+ *	a tcp port to be used for this connection.  Global resource lock,
+ *	'bnx2i_resc_lock' is held while accessing shared global data structures
+ */
+static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
+{
+	struct iscsi_endpoint *ep;
+	struct bnx2i_endpoint *bnx2i_ep;
+
+	ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
+	if (!ep) {
+		printk(KERN_ERR "bnx2i: Could not allocate ep\n");
+		return NULL;
+	}
+
+	bnx2i_ep = ep->dd_data;
+	INIT_LIST_HEAD(&bnx2i_ep->link);
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->hba = hba;
+	bnx2i_ep->hba_age = hba->age;
+	hba->ofld_conns_active++;
+	init_waitqueue_head(&bnx2i_ep->ofld_wait);
+	return ep;
+}
+
+
+/**
+ * bnx2i_free_ep - free endpoint
+ * @ep:		pointer to iscsi endpoint structure
+ */
+static void bnx2i_free_ep(struct iscsi_endpoint *ep)
+{
+	struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bnx2i_resc_lock, flags);
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->hba->ofld_conns_active--;
+
+	bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+	if (bnx2i_ep->conn) {
+		bnx2i_ep->conn->ep = NULL;
+		bnx2i_ep->conn = NULL;
+	}
+
+	bnx2i_ep->hba = NULL;
+	spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+	iscsi_destroy_endpoint(ep);
+}
+
+
+/**
+ * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ * @cmd:	iscsi command structure
+ */
+static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
+			   struct bnx2i_cmd *cmd)
+{
+	struct io_bdt *io = &cmd->io_tbl;
+	struct iscsi_bd *bd;
+
+	io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+					ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
+					&io->bd_tbl_dma, GFP_KERNEL);
+	if (!io->bd_tbl) {
+		iscsi_session_printk(KERN_ERR, session, "Could not "
+				     "allocate bdt.\n");
+		return -ENOMEM;
+	}
+	io->bd_valid = 0;
+	return 0;
+}
+
+/**
+ * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ * @cmd:	iscsi command structure
+ */
+static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
+				   struct iscsi_session *session)
+{
+	int i;
+
+	for (i = 0; i < session->cmds_max; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct bnx2i_cmd *cmd = task->dd_data;
+
+		if (cmd->io_tbl.bd_tbl)
+			dma_free_coherent(&hba->pcidev->dev,
+					  ISCSI_MAX_BDS_PER_CMD *
+					  sizeof(struct iscsi_bd),
+					  cmd->io_tbl.bd_tbl,
+					  cmd->io_tbl.bd_tbl_dma);
+	}
+
+}
+
+
+/**
+ * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ */
+static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
+				struct iscsi_session *session)
+{
+	int i;
+
+	for (i = 0; i < session->cmds_max; i++) {
+		struct iscsi_task *task = session->cmds[i];
+		struct bnx2i_cmd *cmd = task->dd_data;
+
+		/* Anil */
+		task->hdr = &cmd->hdr;
+		task->hdr_max = sizeof(struct iscsi_hdr);
+
+		if (bnx2i_alloc_bdt(hba, session, cmd))
+			goto free_bdts;
+	}
+
+	return 0;
+
+free_bdts:
+	bnx2i_destroy_cmd_pool(hba, session);
+	return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_setup_mp_bdt - allocate BD table resources
+ * @hba:	pointer to adapter structure
+ *
+ * Allocate memory for dummy buffer and associated BD
+ * table to be used by middle path (MP) requests
+ */
+static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
+{
+	int rc = 0;
+	struct iscsi_bd *mp_bdt;
+	u64 addr;
+
+	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					    &hba->mp_bd_dma, GFP_KERNEL);
+	if (!hba->mp_bd_tbl) {
+		printk(KERN_ERR "unable to allocate Middle Path BDT\n");
+		rc = -1;
+		goto out;
+	}
+
+	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					       &hba->dummy_buf_dma, GFP_KERNEL);
+	if (!hba->dummy_buffer) {
+		printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->mp_bd_tbl, hba->mp_bd_dma);
+		hba->mp_bd_tbl = NULL;
+		rc = -1;
+		goto out;
+	}
+
+	mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
+	addr = (unsigned long) hba->dummy_buf_dma;
+	mp_bdt->buffer_addr_lo = addr & 0xffffffff;
+	mp_bdt->buffer_addr_hi = addr >> 32;
+	mp_bdt->buffer_length = PAGE_SIZE;
+	mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+out:
+	return rc;
+}
+
+
+/**
+ * bnx2i_free_mp_bdt - releases ITT back to free pool
+ * @hba:	pointer to adapter instance
+ *
+ * free MP dummy buffer and associated BD table
+ */
+static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
+{
+	if (hba->mp_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->mp_bd_tbl, hba->mp_bd_dma);
+		hba->mp_bd_tbl = NULL;
+	}
+	if (hba->dummy_buffer) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  hba->dummy_buffer, hba->dummy_buf_dma);
+		hba->dummy_buffer = NULL;
+	}
+		return;
+}
+
+/**
+ * bnx2i_drop_session - notifies iscsid of connection error.
+ * @hba:	adapter instance pointer
+ * @session:	iscsi session pointer
+ *
+ * This notifies iscsid that there is a error, so it can initiate
+ * recovery.
+ *
+ * This relies on caller using the iscsi class iterator so the object
+ * is refcounted and does not disapper from under us.
+ */
+void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
+{
+	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
+ * @hba:	pointer to adapter instance
+ * @ep:		pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_add_tail(&ep->link, &hba->ep_destroy_list);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+/**
+ * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
+ *
+ * @hba: 		pointer to adapter instance
+ * @ep: 		pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
+				     struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_del_init(&ep->link);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+
+	return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
+ * @hba:	pointer to adapter instance
+ * @ep:		pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
+				  struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_add_tail(&ep->link, &hba->ep_ofld_list);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
+ * @hba: 		pointer to adapter instance
+ * @ep: 		pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
+				  struct bnx2i_endpoint *ep)
+{
+	write_lock_bh(&hba->ep_rdwr_lock);
+	list_del_init(&ep->link);
+	write_unlock_bh(&hba->ep_rdwr_lock);
+	return 0;
+}
+
+
+/**
+ * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
+ *
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2i_endpoint *ep;
+
+	read_lock_bh(&hba->ep_rdwr_lock);
+	list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
+		ep = (struct bnx2i_endpoint *)list;
+
+		if (ep->ep_iscsi_cid == iscsi_cid)
+			break;
+		ep = NULL;
+	}
+	read_unlock_bh(&hba->ep_rdwr_lock);
+
+	if (!ep)
+		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+	return ep;
+}
+
+
+/**
+ * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
+ * @hba: 		pointer to adapter instance
+ * @iscsi_cid:		iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+	struct list_head *list;
+	struct list_head *tmp;
+	struct bnx2i_endpoint *ep;
+
+	read_lock_bh(&hba->ep_rdwr_lock);
+	list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
+		ep = (struct bnx2i_endpoint *)list;
+
+		if (ep->ep_iscsi_cid == iscsi_cid)
+			break;
+		ep = NULL;
+	}
+	read_unlock_bh(&hba->ep_rdwr_lock);
+
+	if (!ep)
+		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+
+	return ep;
+}
+
+/**
+ * bnx2i_setup_host_queue_size - assigns shost->can_queue param
+ * @hba:	pointer to adapter instance
+ * @shost:	scsi host pointer
+ *
+ * Initializes 'can_queue' parameter based on how many outstanding commands
+ * 	the device can handle. Each device 5708/5709/57710 has different
+ *	capabilities
+ */
+static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
+					struct Scsi_Host *shost)
+{
+	if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+	else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
+	else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
+	else
+		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+}
+
+
+/**
+ * bnx2i_alloc_hba - allocate and init adapter instance
+ * @cnic:	cnic device pointer
+ *
+ * allocate & initialize adapter structure and call other
+ *	support routines to do per adapter initialization
+ */
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+{
+	struct Scsi_Host *shost;
+	struct bnx2i_hba *hba;
+
+	shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
+	if (!shost)
+		return NULL;
+	shost->dma_boundary = cnic->pcidev->dma_mask;
+	shost->transportt = bnx2i_scsi_xport_template;
+	shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+	shost->max_channel = 0;
+	shost->max_lun = 512;
+	shost->max_cmd_len = 16;
+
+	hba = iscsi_host_priv(shost);
+	hba->shost = shost;
+	hba->netdev = cnic->netdev;
+	/* Get PCI related information and update hba struct members */
+	hba->pcidev = cnic->pcidev;
+	pci_dev_get(hba->pcidev);
+	hba->pci_did = hba->pcidev->device;
+	hba->pci_vid = hba->pcidev->vendor;
+	hba->pci_sdid = hba->pcidev->subsystem_device;
+	hba->pci_svid = hba->pcidev->subsystem_vendor;
+	hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
+	hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
+	bnx2i_identify_device(hba);
+
+	bnx2i_identify_device(hba);
+	bnx2i_setup_host_queue_size(hba, shost);
+
+	if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+		hba->regview = ioremap_nocache(hba->netdev->base_addr,
+					       BNX2_MQ_CONFIG2);
+		if (!hba->regview)
+			goto ioreg_map_err;
+	} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+		if (!hba->regview)
+			goto ioreg_map_err;
+	}
+
+	if (bnx2i_setup_mp_bdt(hba))
+		goto mp_bdt_mem_err;
+
+	INIT_LIST_HEAD(&hba->ep_ofld_list);
+	INIT_LIST_HEAD(&hba->ep_destroy_list);
+	rwlock_init(&hba->ep_rdwr_lock);
+
+	hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
+
+	/* different values for 5708/5709/57710 */
+	hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
+
+	if (bnx2i_setup_free_cid_que(hba))
+		goto cid_que_err;
+
+	/* SQ/RQ/CQ size can be changed via sysfx interface */
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
+			hba->max_sqes = sq_size;
+		else
+			hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
+	} else {	/* 5706/5708/5709 */
+		if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
+			hba->max_sqes = sq_size;
+		else
+			hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
+	}
+
+	hba->max_rqes = rq_size;
+	hba->max_cqes = hba->max_sqes + rq_size;
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+		if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
+			hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
+	} else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
+		hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
+
+	hba->num_ccell = hba->max_sqes / 2;
+
+	spin_lock_init(&hba->lock);
+	spin_lock_init(&hba->net_dev_lock);
+
+	if (iscsi_host_add(shost, &hba->pcidev->dev))
+		goto free_dump_mem;
+	return hba;
+
+free_dump_mem:
+	bnx2i_release_free_cid_que(hba);
+cid_que_err:
+	bnx2i_free_mp_bdt(hba);
+mp_bdt_mem_err:
+	if (hba->regview) {
+		iounmap(hba->regview);
+		hba->regview = NULL;
+	}
+ioreg_map_err:
+	pci_dev_put(hba->pcidev);
+	scsi_host_put(shost);
+	return NULL;
+}
+
+/**
+ * bnx2i_free_hba- releases hba structure and resources held by the adapter
+ * @hba:	pointer to adapter instance
+ *
+ * free adapter structure and call various cleanup routines.
+ */
+void bnx2i_free_hba(struct bnx2i_hba *hba)
+{
+	struct Scsi_Host *shost = hba->shost;
+
+	iscsi_host_remove(shost);
+	INIT_LIST_HEAD(&hba->ep_ofld_list);
+	INIT_LIST_HEAD(&hba->ep_destroy_list);
+	pci_dev_put(hba->pcidev);
+
+	if (hba->regview) {
+		iounmap(hba->regview);
+		hba->regview = NULL;
+	}
+	bnx2i_free_mp_bdt(hba);
+	bnx2i_release_free_cid_que(hba);
+	iscsi_host_free(shost);
+}
+
+/**
+ * bnx2i_conn_free_login_resources - free DMA resources used for login process
+ * @hba:		pointer to adapter instance
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Login related resources, mostly BDT & payload DMA memory is freed
+ */
+static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
+					    struct bnx2i_conn *bnx2i_conn)
+{
+	if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  bnx2i_conn->gen_pdu.resp_bd_tbl,
+				  bnx2i_conn->gen_pdu.resp_bd_dma);
+		bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.req_bd_tbl) {
+		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				  bnx2i_conn->gen_pdu.req_bd_tbl,
+				  bnx2i_conn->gen_pdu.req_bd_dma);
+		bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.resp_buf) {
+		dma_free_coherent(&hba->pcidev->dev,
+				  ISCSI_DEF_MAX_RECV_SEG_LEN,
+				  bnx2i_conn->gen_pdu.resp_buf,
+				  bnx2i_conn->gen_pdu.resp_dma_addr);
+		bnx2i_conn->gen_pdu.resp_buf = NULL;
+	}
+
+	if (bnx2i_conn->gen_pdu.req_buf) {
+		dma_free_coherent(&hba->pcidev->dev,
+				  ISCSI_DEF_MAX_RECV_SEG_LEN,
+				  bnx2i_conn->gen_pdu.req_buf,
+				  bnx2i_conn->gen_pdu.req_dma_addr);
+		bnx2i_conn->gen_pdu.req_buf = NULL;
+	}
+}
+
+/**
+ * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
+ * @hba:		pointer to adapter instance
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Mgmt task DNA resources are allocated in this routine.
+ */
+static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
+					    struct bnx2i_conn *bnx2i_conn)
+{
+	/* Allocate memory for login request/response buffers */
+	bnx2i_conn->gen_pdu.req_buf =
+		dma_alloc_coherent(&hba->pcidev->dev,
+				   ISCSI_DEF_MAX_RECV_SEG_LEN,
+				   &bnx2i_conn->gen_pdu.req_dma_addr,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.req_buf == NULL)
+		goto login_req_buf_failure;
+
+	bnx2i_conn->gen_pdu.req_buf_size = 0;
+	bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
+
+	bnx2i_conn->gen_pdu.resp_buf =
+		dma_alloc_coherent(&hba->pcidev->dev,
+				   ISCSI_DEF_MAX_RECV_SEG_LEN,
+				   &bnx2i_conn->gen_pdu.resp_dma_addr,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.resp_buf == NULL)
+		goto login_resp_buf_failure;
+
+	bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
+
+	bnx2i_conn->gen_pdu.req_bd_tbl =
+		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				   &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
+		goto login_req_bd_tbl_failure;
+
+	bnx2i_conn->gen_pdu.resp_bd_tbl =
+		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+				   &bnx2i_conn->gen_pdu.resp_bd_dma,
+				   GFP_KERNEL);
+	if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
+		goto login_resp_bd_tbl_failure;
+
+	return 0;
+
+login_resp_bd_tbl_failure:
+	dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+			  bnx2i_conn->gen_pdu.req_bd_tbl,
+			  bnx2i_conn->gen_pdu.req_bd_dma);
+	bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+			  bnx2i_conn->gen_pdu.resp_buf,
+			  bnx2i_conn->gen_pdu.resp_dma_addr);
+	bnx2i_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+			  bnx2i_conn->gen_pdu.req_buf,
+			  bnx2i_conn->gen_pdu.req_dma_addr);
+	bnx2i_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+	iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
+			  "login resource alloc failed!!\n");
+	return -ENOMEM;
+
+}
+
+
+/**
+ * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
+ * @bnx2i_conn:		iscsi connection pointer
+ *
+ * Allocates buffers and BD tables before shipping requests to cnic
+ *	for PDUs prepared by 'iscsid' daemon
+ */
+static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
+{
+	struct iscsi_bd *bd_tbl;
+
+	bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
+
+	bd_tbl->buffer_addr_hi =
+		(u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
+	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
+	bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
+				bnx2i_conn->gen_pdu.req_buf;
+	bd_tbl->reserved0 = 0;
+	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+	bd_tbl = (struct iscsi_bd  *) bnx2i_conn->gen_pdu.resp_bd_tbl;
+	bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
+	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
+	bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
+	bd_tbl->reserved0 = 0;
+	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+			ISCSI_BD_FIRST_IN_BD_CHAIN;
+}
+
+
+/**
+ * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
+ * @task:	transport layer task pointer
+ *
+ * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
+ *	Nop-out and Logout requests flow through this path.
+ */
+static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
+{
+	struct bnx2i_cmd *cmd = task->dd_data;
+	struct bnx2i_conn *bnx2i_conn = cmd->conn;
+	int rc = 0;
+	char *buf;
+	int data_len;
+
+	bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
+	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+	case ISCSI_OP_LOGIN:
+		bnx2i_send_iscsi_login(bnx2i_conn, task);
+		break;
+	case ISCSI_OP_NOOP_OUT:
+		data_len = bnx2i_conn->gen_pdu.req_buf_size;
+		buf = bnx2i_conn->gen_pdu.req_buf;
+		if (data_len)
+			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+						     RESERVED_ITT,
+						     buf, data_len, 1);
+		else
+			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+						     RESERVED_ITT,
+						     NULL, 0, 1);
+		break;
+	case ISCSI_OP_LOGOUT:
+		rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
+		break;
+	default:
+		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+				  "send_gen: unsupported op 0x%x\n",
+				  task->hdr->opcode);
+	}
+	return rc;
+}
+
+
+/**********************************************************************
+ *		SCSI-ML Interface
+ **********************************************************************/
+
+/**
+ * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
+ * @sc:		SCSI-ML command pointer
+ * @cmd:	iscsi cmd pointer
+ */
+static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
+{
+	u32 dword;
+	int lpcnt;
+	u8 *srcp;
+	u32 *dstp;
+	u32 scsi_lun[2];
+
+	int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
+	cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
+	cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
+
+	lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
+	srcp = (u8 *) sc->cmnd;
+	dstp = (u32 *) cmd->req.cdb;
+	while (lpcnt--) {
+		memcpy(&dword, (const void *) srcp, 4);
+		*dstp = cpu_to_be32(dword);
+		srcp += 4;
+		dstp++;
+	}
+	if (sc->cmd_len & 0x3) {
+		dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
+		*dstp = cpu_to_be32(dword);
+	}
+}
+
+static void bnx2i_cleanup_task(struct iscsi_task *task)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_hba *hba = bnx2i_conn->hba;
+
+	/*
+	 * mgmt task or cmd was never sent to us to transmit.
+	 */
+	if (!task->sc || task->state == ISCSI_TASK_PENDING)
+		return;
+	/*
+	 * need to clean-up task context to claim dma buffers
+	 */
+	if (task->state == ISCSI_TASK_ABRT_TMF) {
+		bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
+
+		spin_unlock_bh(&conn->session->lock);
+		wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
+				msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
+		spin_lock_bh(&conn->session->lock);
+	}
+	bnx2i_iscsi_unmap_sg_list(task->dd_data);
+}
+
+/**
+ * bnx2i_mtask_xmit - transmit mtask to chip for further processing
+ * @conn:	transport layer conn structure pointer
+ * @task:	transport layer command structure pointer
+ */
+static int
+bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct bnx2i_cmd *cmd = task->dd_data;
+
+	memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+	bnx2i_setup_cmd_wqe_template(cmd);
+	bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+	if (task->data_count) {
+		memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
+		       task->data_count);
+		bnx2i_conn->gen_pdu.req_wr_ptr =
+			bnx2i_conn->gen_pdu.req_buf + task->data_count;
+	}
+	cmd->conn = conn->dd_data;
+	cmd->scsi_cmd = NULL;
+	return bnx2i_iscsi_send_generic_request(task);
+}
+
+/**
+ * bnx2i_task_xmit - transmit iscsi command to chip for further processing
+ * @task:	transport layer command structure pointer
+ *
+ * maps SG buffers and send request to chip/firmware in the form of SQ WQE
+ */
+static int bnx2i_task_xmit(struct iscsi_task *task)
+{
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_session *session = conn->session;
+	struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct scsi_cmnd *sc = task->sc;
+	struct bnx2i_cmd *cmd = task->dd_data;
+	struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+
+	if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+		return -ENOTCONN;
+
+	if (!bnx2i_conn->is_bound)
+		return -ENOTCONN;
+
+	/*
+	 * If there is no scsi_cmnd this must be a mgmt task
+	 */
+	if (!sc)
+		return bnx2i_mtask_xmit(conn, task);
+
+	bnx2i_setup_cmd_wqe_template(cmd);
+	cmd->req.op_code = ISCSI_OP_SCSI_CMD;
+	cmd->conn = bnx2i_conn;
+	cmd->scsi_cmd = sc;
+	cmd->req.total_data_transfer_length = scsi_bufflen(sc);
+	cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
+
+	bnx2i_iscsi_map_sg_list(cmd);
+	bnx2i_cpy_scsi_cdb(sc, cmd);
+
+	cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
+	if (sc->sc_data_direction == DMA_TO_DEVICE) {
+		cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
+		cmd->req.itt = task->itt |
+			(ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+		bnx2i_setup_write_cmd_bd_info(task);
+	} else {
+		if (scsi_bufflen(sc))
+			cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
+		cmd->req.itt = task->itt |
+			(ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+	}
+
+	cmd->req.num_bds = cmd->io_tbl.bd_valid;
+	if (!cmd->io_tbl.bd_valid) {
+		cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
+		cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+		cmd->req.num_bds = 1;
+	}
+
+	bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
+	return 0;
+}
+
+/**
+ * bnx2i_session_create - create a new iscsi session
+ * @cmds_max:		max commands supported
+ * @qdepth:		scsi queue depth to support
+ * @initial_cmdsn:	initial iscsi CMDSN to be used for this session
+ *
+ * Creates a new iSCSI session instance on given device.
+ */
+static struct iscsi_cls_session *
+bnx2i_session_create(struct iscsi_endpoint *ep,
+		     uint16_t cmds_max, uint16_t qdepth,
+		     uint32_t initial_cmdsn)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_cls_session *cls_session;
+	struct bnx2i_hba *hba;
+	struct bnx2i_endpoint *bnx2i_ep;
+	if (!ep) {
+		printk(KERN_ERR "bnx2i: missing ep.\n");
+		return NULL;
+	}
+
+	bnx2i_ep = ep->dd_data;
+	shost = bnx2i_ep->hba->shost;
+	hba = iscsi_host_priv(shost);
+	if (bnx2i_adapter_ready(hba))
+		return NULL;
+
+	/*
+	 * user can override hw limit as long as it is within
+	 * the min/max.
+	 */
+	if (cmds_max > hba->max_sqes)
+		cmds_max = hba->max_sqes;
+	else if (cmds_max < BNX2I_SQ_WQES_MIN)
+		cmds_max = BNX2I_SQ_WQES_MIN;
+
+	cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
+					  cmds_max, sizeof(struct bnx2i_cmd),
+					  initial_cmdsn, ISCSI_MAX_TARGET);
+	if (!cls_session)
+		return NULL;
+
+	if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
+		goto session_teardown;
+	return cls_session;
+
+session_teardown:
+	iscsi_session_teardown(cls_session);
+	return NULL;
+}
+
+
+/**
+ * bnx2i_session_destroy - destroys iscsi session
+ * @cls_session:	pointer to iscsi cls session
+ *
+ * Destroys previously created iSCSI session instance and releases
+ *	all resources held by it
+ */
+static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *session = cls_session->dd_data;
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+
+	bnx2i_destroy_cmd_pool(hba, session);
+	iscsi_session_teardown(cls_session);
+}
+
+
+/**
+ * bnx2i_conn_create - create iscsi connection instance
+ * @cls_session:	pointer to iscsi cls session
+ * @cid:		iscsi cid as per rfc (not NX2's CID terminology)
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *
+bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_conn *bnx2i_conn;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_conn *conn;
+
+	cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
+				    cid);
+	if (!cls_conn)
+		return NULL;
+	conn = cls_conn->dd_data;
+
+	bnx2i_conn = conn->dd_data;
+	bnx2i_conn->cls_conn = cls_conn;
+	bnx2i_conn->hba = hba;
+	/* 'ep' ptr will be assigned in bind() call */
+	bnx2i_conn->ep = NULL;
+	bnx2i_conn->exp_statsn = BNX2I_STATSN_UPDATE_SIGNATURE;
+	init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
+
+	if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
+		iscsi_conn_printk(KERN_ALERT, conn,
+				  "conn_new: login resc alloc failed!!\n");
+		goto free_conn;
+	}
+
+	return cls_conn;
+
+free_conn:
+	iscsi_conn_teardown(cls_conn);
+	return NULL;
+}
+
+/**
+ * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
+ * @cls_session:	pointer to iscsi cls session
+ * @cls_conn:		pointer to iscsi cls conn
+ * @transport_fd:	64-bit EP handle
+ * @is_leading:		leading connection on this session?
+ *
+ * Binds together iSCSI session instance, iSCSI connection instance
+ *	and the TCP connection. This routine returns error code if
+ *	TCP connection does not belong on the device iSCSI sess/conn
+ *	is bound
+ */
+static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+			   struct iscsi_cls_conn *cls_conn,
+			   uint64_t transport_fd, int is_leading)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct iscsi_endpoint *ep;
+	int ret_code;
+
+	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
+
+	bnx2i_ep = ep->dd_data;
+	if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+	    (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+		/* Peer disconnect via' FIN or RST */
+		return -EINVAL;
+
+	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+		return -EINVAL;
+
+	if (bnx2i_ep->hba != hba) {
+		/* Error - TCP connection does not belong to this device
+		 */
+		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+				  "conn bind, ep=0x%p (%s) does not",
+				  bnx2i_ep, bnx2i_ep->hba->netdev->name);
+		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+				  "belong to hba (%s)\n",
+				  hba->netdev->name);
+		return -EEXIST;
+	}
+
+	bnx2i_ep->conn = bnx2i_conn;
+	bnx2i_conn->ep = bnx2i_ep;
+	bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
+	bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
+	bnx2i_conn->is_bound = 1;
+
+	ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
+						bnx2i_ep->ep_iscsi_cid);
+
+	/* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
+	 * driver needs to explicitly replenish RQ index during setup.
+	 */
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+		bnx2i_put_rq_buf(bnx2i_conn, 0);
+
+	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+	return ret_code;
+}
+
+
+/**
+ * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
+ * @cls_conn:	pointer to iscsi cls conn
+ *
+ * Destroy an iSCSI connection instance and release memory resources held by
+ *	this connection
+ */
+static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	struct Scsi_Host *shost;
+	struct bnx2i_hba *hba;
+
+	shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+	hba = iscsi_host_priv(shost);
+
+	bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+	iscsi_conn_teardown(cls_conn);
+}
+
+
+/**
+ * bnx2i_conn_get_param - return iscsi connection parameter to caller
+ * @cls_conn:	pointer to iscsi cls conn
+ * @param:	parameter type identifier
+ * @buf: 	buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+				enum iscsi_param param, char *buf)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+	int len = 0;
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+		if (bnx2i_conn->ep)
+			len = sprintf(buf, "%hu\n",
+				      bnx2i_conn->ep->cm_sk->dst_port);
+		break;
+	case ISCSI_PARAM_CONN_ADDRESS:
+		if (bnx2i_conn->ep)
+			len = sprintf(buf, NIPQUAD_FMT "\n",
+				      NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
+		break;
+	default:
+		return iscsi_conn_get_param(cls_conn, param, buf);
+	}
+
+	return len;
+}
+
+/**
+ * bnx2i_host_get_param - returns host (adapter) related parameters
+ * @shost:	scsi host pointer
+ * @param:	parameter type identifier
+ * @buf:	buffer pointer
+ */
+static int bnx2i_host_get_param(struct Scsi_Host *shost,
+				enum iscsi_host_param param, char *buf)
+{
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	int len = 0;
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_HWADDRESS:
+		len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
+		break;
+	case ISCSI_HOST_PARAM_NETDEV_NAME:
+		len = sprintf(buf, "%s\n", hba->netdev->name);
+		break;
+	default:
+		return iscsi_host_get_param(shost, param, buf);
+	}
+	return len;
+}
+
+/**
+ * bnx2i_conn_start - completes iscsi connection migration to FFP
+ * @cls_conn:	pointer to iscsi cls conn
+ *
+ * last call in FFP migration to handover iscsi conn to the driver
+ */
+static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+
+	bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
+	bnx2i_update_iscsi_conn(conn);
+
+	/*
+	 * this should normally not sleep for a long time so it should
+	 * not disrupt the caller.
+	 */
+	bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
+	bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
+	add_timer(&bnx2i_conn->ep->ofld_timer);
+	/* update iSCSI context for this conn, wait for CNIC to complete */
+	wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
+			bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_conn->ep->ofld_timer);
+
+	iscsi_conn_start(cls_conn);
+	return 0;
+}
+
+
+/**
+ * bnx2i_conn_get_stats - returns iSCSI stats
+ * @cls_conn:	pointer to iscsi cls conn
+ * @stats:	pointer to iscsi statistic struct
+ */
+static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+				 struct iscsi_stats *stats)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+
+	stats->txdata_octets = conn->txdata_octets;
+	stats->rxdata_octets = conn->rxdata_octets;
+	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+	stats->dataout_pdus = conn->dataout_pdus_cnt;
+	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+	stats->datain_pdus = conn->datain_pdus_cnt;
+	stats->r2t_pdus = conn->r2t_pdus_cnt;
+	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+	stats->custom_length = 3;
+	strcpy(stats->custom[2].desc, "eh_abort_cnt");
+	stats->custom[2].value = conn->eh_abort_cnt;
+	stats->digest_err = 0;
+	stats->timeout_err = 0;
+	stats->custom_length = 0;
+}
+
+
+
+/**
+ * bnx2i_check_nx2_dev_busy - this routine unregister devices.
+ *
+ * This should only unregister if there are no active conns.
+ */
+void bnx2i_check_nx2_dev_busy(void)
+{
+	bnx2i_unreg_dev_all();
+}
+
+
+/**
+ * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
+ * @dst_addr:	target IP address
+ *
+ * check if route resolves to BNX2 device
+ */
+static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
+{
+	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+	struct bnx2i_hba *hba;
+	struct cnic_dev *cnic = NULL;
+
+	bnx2i_reg_dev_all();
+
+	hba = get_adapter_list_head();
+	if (hba && hba->cnic)
+		cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
+	if (!cnic) {
+		printk(KERN_ALERT "bnx2i: no route,"
+		       "can't connect using cnic\n");
+		goto no_nx2_route;
+	}
+	hba = bnx2i_find_hba_for_cnic(cnic);
+	if (!hba)
+		goto no_nx2_route;
+
+	if (bnx2i_adapter_ready(hba)) {
+		printk(KERN_ALERT "bnx2i: check route, hba not found\n");
+		goto no_nx2_route;
+	}
+	if (hba->netdev->mtu > hba->mtu_supported) {
+		printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
+				  hba->netdev->name, hba->netdev->mtu);
+		printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
+				  hba->mtu_supported);
+		goto no_nx2_route;
+	}
+	return hba;
+no_nx2_route:
+	return NULL;
+}
+
+
+/**
+ * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
+ * @hba:	pointer to adapter instance
+ * @ep:		endpoint (transport indentifier) structure
+ *
+ * destroys cm_sock structure and on chip iscsi context
+ */
+static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+				 struct bnx2i_endpoint *ep)
+{
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+		hba->cnic->cm_destroy(ep->cm_sk);
+
+	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+		ep->state = EP_STATE_DISCONN_COMPL;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
+	    ep->state == EP_STATE_DISCONN_TIMEDOUT) {
+		printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
+				  " NW/PCIe trace, driver msgs to developers"
+				  " for analysis\n");
+		return 1;
+	}
+
+	ep->state = EP_STATE_CLEANUP_START;
+	init_timer(&ep->ofld_timer);
+	ep->ofld_timer.expires = 10*HZ + jiffies;
+	ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	ep->ofld_timer.data = (unsigned long) ep;
+	add_timer(&ep->ofld_timer);
+
+	bnx2i_ep_destroy_list_add(hba, ep);
+
+	/* destroy iSCSI context, wait for it to complete */
+	bnx2i_send_conn_destroy(hba, ep);
+	wait_event_interruptible(ep->ofld_wait,
+				 (ep->state != EP_STATE_CLEANUP_START));
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&ep->ofld_timer);
+
+	bnx2i_ep_destroy_list_del(hba, ep);
+
+	if (ep->state != EP_STATE_CLEANUP_CMPL)
+		/* should never happen */
+		printk(KERN_ALERT "bnx2i - conn destroy failed\n");
+
+	return 0;
+}
+
+
+/**
+ * bnx2i_ep_connect - establish TCP connection to target portal
+ * @shost:		scsi host
+ * @dst_addr:		target IP address
+ * @non_blocking:	blocking or non-blocking call
+ *
+ * this routine initiates the TCP/IP connection by invoking Option-2 i/f
+ *	with l5_core and the CNIC. This is a multi-step process of resolving
+ *	route to target, create a iscsi connection context, handshaking with
+ *	CNIC module to create/initialize the socket struct and finally
+ *	sending down option-2 request to complete TCP 3-way handshake
+ */
+static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
+					       struct sockaddr *dst_addr,
+					       int non_blocking)
+{
+	u32 iscsi_cid = BNX2I_CID_RESERVED;
+	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+	struct sockaddr_in6 *desti6;
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct bnx2i_hba *hba;
+	struct cnic_dev *cnic;
+	struct cnic_sockaddr saddr;
+	struct iscsi_endpoint *ep;
+	int rc = 0;
+
+	if (shost)
+		/* driver is given scsi host to work with */
+		hba = iscsi_host_priv(shost);
+	else
+		/*
+		 * check if the given destination can be reached through
+		 * a iscsi capable NetXtreme2 device
+		 */
+		hba = bnx2i_check_route(dst_addr);
+	if (!hba) {
+		rc = -ENOMEM;
+		goto check_busy;
+	}
+
+	cnic = hba->cnic;
+	ep = bnx2i_alloc_ep(hba);
+	if (!ep) {
+		rc = -ENOMEM;
+		goto check_busy;
+	}
+	bnx2i_ep = ep->dd_data;
+
+	spin_lock(&hba->net_dev_lock);
+	if (bnx2i_adapter_ready(hba)) {
+		rc = -EPERM;
+		goto net_if_down;
+	}
+
+	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->ep_iscsi_cid = (u16)RESERVED_ITT;
+	bnx2i_ep->num_active_cmds = 0;
+	iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
+	if (iscsi_cid == RESERVED_ITT) {
+		printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+		rc = -ENOMEM;
+		goto iscsi_cid_err;
+	}
+	bnx2i_ep->hba_age = hba->age;
+
+	rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
+	if (rc != 0) {
+		printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+		rc = -ENOMEM;
+		goto qp_resc_err;
+	}
+
+	bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
+	bnx2i_ep->state = EP_STATE_OFLD_START;
+	bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
+
+	init_timer(&bnx2i_ep->ofld_timer);
+	bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
+	bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+	add_timer(&bnx2i_ep->ofld_timer);
+
+	bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
+
+	/* Wait for CNIC hardware to setup conn context and return 'cid' */
+	wait_event_interruptible(bnx2i_ep->ofld_wait,
+				 bnx2i_ep->state != EP_STATE_OFLD_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_ep->ofld_timer);
+
+	bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+
+	if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
+		rc = -ENOSPC;
+		goto conn_failed;
+	}
+
+/*
+	if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		rc = -EINVAL;
+		goto conn_failed;
+	} else
+*/
+		rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
+				     iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
+	if (rc) {
+		rc = -EINVAL;
+		goto conn_failed;
+	}
+
+	bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
+	bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
+	clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
+
+	memset(&saddr, 0, sizeof(saddr));
+	if (dst_addr->sa_family == AF_INET) {
+		desti = (struct sockaddr_in *) dst_addr;
+		saddr.remote.v4 = *desti;
+		saddr.local.v4.sin_family = desti->sin_family;
+	} else if (dst_addr->sa_family == AF_INET6) {
+		desti6 = (struct sockaddr_in6 *) dst_addr;
+		saddr.remote.v6 = *desti6;
+		saddr.local.v6.sin6_family = desti6->sin6_family;
+	}
+
+	bnx2i_ep->timestamp = jiffies;
+	bnx2i_ep->state = EP_STATE_CONNECT_START;
+	if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		rc = -EINVAL;
+		goto conn_failed;
+	} else
+		rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
+
+	if (rc)
+		goto release_ep;
+
+	if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+		goto release_ep;
+	spin_unlock(&hba->net_dev_lock);
+	return ep;
+
+release_ep:
+	if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+		spin_unlock(&hba->net_dev_lock);
+		return ERR_PTR(rc);
+	}
+conn_failed:
+net_if_down:
+iscsi_cid_err:
+	bnx2i_free_qp_resc(hba, bnx2i_ep);
+qp_resc_err:
+	bnx2i_free_ep(ep);
+	spin_unlock(&hba->net_dev_lock);
+check_busy:
+	bnx2i_check_nx2_dev_busy();
+	return ERR_PTR(rc);
+}
+
+
+/**
+ * bnx2i_ep_poll - polls for TCP connection establishement
+ * @ep:			TCP connection (endpoint) handle
+ * @timeout_ms:		timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+	struct bnx2i_endpoint *bnx2i_ep;
+	int rc = 0;
+
+	bnx2i_ep = ep->dd_data;
+	if ((bnx2i_ep->state == EP_STATE_IDLE) ||
+	    (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
+	    (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+		return -1;
+	if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
+		return 1;
+
+	rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
+					      ((bnx2i_ep->state ==
+						EP_STATE_OFLD_FAILED) ||
+					       (bnx2i_ep->state ==
+						EP_STATE_CONNECT_FAILED) ||
+					       (bnx2i_ep->state ==
+						EP_STATE_CONNECT_COMPL)),
+					      msecs_to_jiffies(timeout_ms));
+	if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+		rc = -1;
+
+	if (rc > 0)
+		return 1;
+	else if (!rc)
+		return 0;	/* timeout */
+	else
+		return rc;
+}
+
+
+/**
+ * bnx2i_ep_tcp_conn_active - check EP state transition
+ * @ep:		endpoint pointer
+ *
+ * check if underlying TCP connection is active
+ */
+static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
+{
+	int ret;
+	int cnic_dev_10g = 0;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+		cnic_dev_10g = 1;
+
+	switch (bnx2i_ep->state) {
+	case EP_STATE_CONNECT_START:
+	case EP_STATE_CLEANUP_FAILED:
+	case EP_STATE_OFLD_FAILED:
+	case EP_STATE_DISCONN_TIMEDOUT:
+		ret = 0;
+		break;
+	case EP_STATE_CONNECT_COMPL:
+	case EP_STATE_ULP_UPDATE_START:
+	case EP_STATE_ULP_UPDATE_COMPL:
+	case EP_STATE_TCP_FIN_RCVD:
+	case EP_STATE_ULP_UPDATE_FAILED:
+		ret = 1;
+		break;
+	case EP_STATE_TCP_RST_RCVD:
+		ret = 0;
+		break;
+	case EP_STATE_CONNECT_FAILED:
+		if (cnic_dev_10g)
+			ret = 1;
+		else
+			ret = 0;
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+
+/**
+ * bnx2i_ep_disconnect - executes TCP connection teardown process
+ * @ep:		TCP connection (endpoint) handle
+ *
+ * executes  TCP connection teardown process
+ */
+static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+	struct bnx2i_endpoint *bnx2i_ep;
+	struct bnx2i_conn *bnx2i_conn = NULL;
+	struct iscsi_session *session = NULL;
+	struct iscsi_conn *conn;
+	struct cnic_dev *cnic;
+	struct bnx2i_hba *hba;
+
+	bnx2i_ep = ep->dd_data;
+
+	/* driver should not attempt connection cleanup untill TCP_CONNECT
+	 * completes either successfully or fails. Timeout is 9-secs, so
+	 * wait for it to complete
+	 */
+	while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
+		!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
+		msleep(250);
+
+	if (bnx2i_ep->conn) {
+		bnx2i_conn = bnx2i_ep->conn;
+		conn = bnx2i_conn->cls_conn->dd_data;
+		session = conn->session;
+
+		spin_lock_bh(&session->lock);
+		bnx2i_conn->is_bound = 0;
+		spin_unlock_bh(&session->lock);
+	}
+
+	hba = bnx2i_ep->hba;
+	if (bnx2i_ep->state == EP_STATE_IDLE)
+		goto return_bnx2i_ep;
+	cnic = hba->cnic;
+
+	spin_lock(&hba->net_dev_lock);
+
+	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+		goto free_resc;
+	if (bnx2i_ep->hba_age != hba->age)
+		goto free_resc;
+
+	if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
+		goto destory_conn;
+
+	bnx2i_ep->state = EP_STATE_DISCONN_START;
+
+	init_timer(&bnx2i_ep->ofld_timer);
+	bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
+	bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+	add_timer(&bnx2i_ep->ofld_timer);
+
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+		int close = 0;
+
+		if (session) {
+			spin_lock_bh(&session->lock);
+			if (session->state == ISCSI_STATE_LOGGING_OUT)
+				close = 1;
+			spin_unlock_bh(&session->lock);
+		}
+		if (close)
+			cnic->cm_close(bnx2i_ep->cm_sk);
+		else
+			cnic->cm_abort(bnx2i_ep->cm_sk);
+	} else
+		goto free_resc;
+
+	/* wait for option-2 conn teardown */
+	wait_event_interruptible(bnx2i_ep->ofld_wait,
+				 bnx2i_ep->state != EP_STATE_DISCONN_START);
+
+	if (signal_pending(current))
+		flush_signals(current);
+	del_timer_sync(&bnx2i_ep->ofld_timer);
+
+destory_conn:
+	if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+		spin_unlock(&hba->net_dev_lock);
+		return;
+	}
+free_resc:
+	spin_unlock(&hba->net_dev_lock);
+	bnx2i_free_qp_resc(hba, bnx2i_ep);
+return_bnx2i_ep:
+	if (bnx2i_conn)
+		bnx2i_conn->ep = NULL;
+
+	bnx2i_free_ep(ep);
+
+	if (!hba->ofld_conns_active)
+		bnx2i_check_nx2_dev_busy();
+}
+
+
+/**
+ * bnx2i_nl_set_path -
+ * @params:	pointer to iscsi_path message
+ *
+ */
+static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
+{
+	struct bnx2i_hba *hba = iscsi_host_priv(shost);
+	char *buf = (char *) params;
+	u16 len = sizeof(*params);
+
+	/* handled by cnic driver */
+	hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
+				     len);
+
+	return 0;
+}
+
+
+/*
+ * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
+ * used while registering with the scsi host and iSCSI transport module.
+ */
+static struct scsi_host_template bnx2i_host_template = {
+	.module			= THIS_MODULE,
+	.name			= "Broadcom Offload iSCSI Initiator",
+	.proc_name		= "bnx2i",
+	.queuecommand		= iscsi_queuecommand,
+	.eh_abort_handler	= iscsi_eh_abort,
+	.eh_device_reset_handler = iscsi_eh_device_reset,
+	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.can_queue		= 1024,
+	.max_sectors		= 127,
+	.cmd_per_lun		= 32,
+	.this_id		= -1,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.sg_tablesize		= ISCSI_MAX_BDS_PER_CMD,
+	.shost_attrs		= bnx2i_dev_attributes,
+};
+
+struct iscsi_transport bnx2i_iscsi_transport = {
+	.owner			= THIS_MODULE,
+	.name			= "bnx2i",
+	.caps			= CAP_RECOVERY_L0 | CAP_HDRDGST |
+				  CAP_MULTI_R2T | CAP_DATADGST |
+				  CAP_DATA_PATH_OFFLOAD,
+	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
+				  ISCSI_MAX_XMIT_DLENGTH |
+				  ISCSI_HDRDGST_EN |
+				  ISCSI_DATADGST_EN |
+				  ISCSI_INITIAL_R2T_EN |
+				  ISCSI_MAX_R2T |
+				  ISCSI_IMM_DATA_EN |
+				  ISCSI_FIRST_BURST |
+				  ISCSI_MAX_BURST |
+				  ISCSI_PDU_INORDER_EN |
+				  ISCSI_DATASEQ_INORDER_EN |
+				  ISCSI_ERL |
+				  ISCSI_CONN_PORT |
+				  ISCSI_CONN_ADDRESS |
+				  ISCSI_EXP_STATSN |
+				  ISCSI_PERSISTENT_PORT |
+				  ISCSI_PERSISTENT_ADDRESS |
+				  ISCSI_TARGET_NAME | ISCSI_TPGT |
+				  ISCSI_USERNAME | ISCSI_PASSWORD |
+				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+				  ISCSI_LU_RESET_TMO |
+				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
+				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
+	.create_session		= bnx2i_session_create,
+	.destroy_session	= bnx2i_session_destroy,
+	.create_conn		= bnx2i_conn_create,
+	.bind_conn		= bnx2i_conn_bind,
+	.destroy_conn		= bnx2i_conn_destroy,
+	.set_param		= iscsi_set_param,
+	.get_conn_param		= bnx2i_conn_get_param,
+	.get_session_param	= iscsi_session_get_param,
+	.get_host_param		= bnx2i_host_get_param,
+	.start_conn		= bnx2i_conn_start,
+	.stop_conn		= iscsi_conn_stop,
+	.send_pdu		= iscsi_conn_send_pdu,
+	.xmit_task		= bnx2i_task_xmit,
+	.get_stats		= bnx2i_conn_get_stats,
+	/* TCP connect - disconnect - option-2 interface calls */
+	.ep_connect		= bnx2i_ep_connect,
+	.ep_poll		= bnx2i_ep_poll,
+	.ep_disconnect		= bnx2i_ep_disconnect,
+	.set_path		= bnx2i_nl_set_path,
+	/* Error recovery timeout call */
+	.session_recovery_timedout = iscsi_session_recovery_timedout,
+	.cleanup_task		= bnx2i_cleanup_task,
+};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 0000000..96426b7
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
+/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2004 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+/**
+ * bnx2i_dev_to_hba - maps dev pointer to adapter struct
+ * @dev:	device pointer
+ *
+ * Map device to hba structure
+ */
+static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	return iscsi_host_priv(shost);
+}
+
+
+/**
+ * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ *
+ * Returns current SQ size parameter, this paramater determines the number
+ * outstanding iSCSI commands supported on a connection
+ */
+static ssize_t bnx2i_show_sq_info(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	return sprintf(buf, "0x%x\n", hba->max_sqes);
+}
+
+
+/**
+ * bnx2i_set_sq_info - update send queue (SQ) size parameter
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ * @count:	parameter buffer size
+ *
+ * Interface for user to change shared queue size allocated for each conn
+ * Must be within SQ limits and a power of 2. For the latter this is needed
+ * because of how libiscsi preallocates tasks.
+ */
+static ssize_t bnx2i_set_sq_info(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+	u32 val;
+	int max_sq_size;
+
+	if (hba->ofld_conns_active)
+		goto skip_config;
+
+	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+		max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
+	else
+		max_sq_size = BNX2I_570X_SQ_WQES_MAX;
+
+	if (sscanf(buf, " 0x%x ", &val) > 0) {
+		if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
+		    (is_power_of_2(val)))
+			hba->max_sqes = val;
+	}
+
+	return count;
+
+skip_config:
+	printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
+	return 0;
+}
+
+
+/**
+ * bnx2i_show_ccell_info - returns command cell (HQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ *
+ * returns per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_show_ccell_info(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	return sprintf(buf, "0x%x\n", hba->num_ccell);
+}
+
+
+/**
+ * bnx2i_get_link_state - set command cell (HQ) size
+ * @dev:	device pointer
+ * @buf:	buffer to return current SQ size parameter
+ * @count:	parameter buffer size
+ *
+ * updates per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_set_ccell_info(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	u32 val;
+	struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+	if (hba->ofld_conns_active)
+		goto skip_config;
+
+	if (sscanf(buf, " 0x%x ", &val) > 0) {
+		if ((val >= BNX2I_CCELLS_MIN) &&
+		    (val <= BNX2I_CCELLS_MAX)) {
+			hba->num_ccell = val;
+		}
+	}
+
+	return count;
+
+skip_config:
+	printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
+	return 0;
+}
+
+
+static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
+		   bnx2i_show_sq_info, bnx2i_set_sq_info);
+static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
+		   bnx2i_show_ccell_info, bnx2i_set_ccell_info);
+
+struct device_attribute *bnx2i_dev_attributes[] = {
+	&dev_attr_sq_size,
+	&dev_attr_num_ccell,
+	NULL
+};
-- 
1.5.6.GIT



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/4] cnic: Add new Broadcom CNIC driver.
  2009-05-23 21:11   ` [PATCH 3/4] cnic: Add new Broadcom " Michael Chan
@ 2009-05-25 15:19     ` Rolf Eike Beer
  2009-05-26  5:35       ` Michael Chan
  0 siblings, 1 reply; 11+ messages in thread
From: Rolf Eike Beer @ 2009-05-25 15:19 UTC (permalink / raw)
  To: Michael Chan
  Cc: James.Bottomley, michaelc, davem, linux-scsi, open-iscsi, anilgv, benli

[-- Attachment #1: Type: text/plain, Size: 31497 bytes --]

Michael Chan wrote:
> The CNIC driver controls BNX2 hardware rings and resources used by
> iSCSI.  Most hardware resources for iSCSI are separate from those
> used for ethernet networking.
>
> iSCSI uses a separate MAC address and IP address.  The CNIC driver
> creates a UIO interface to handle the non-offloaded packets such as
> ARP, etc in userspace.
>

> +static int cnic_alloc_context(struct cnic_dev *dev)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +
> +	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
> +		int i, k, arr_size;
> +
> +		cp->ctx_blk_size = BCM_PAGE_SIZE;
> +		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
> +		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
> +			   sizeof(struct cnic_ctx);
> +		cp->ctx_arr = kmalloc(arr_size, GFP_KERNEL);
> +		if (cp->ctx_arr == NULL)
> +			return -ENOMEM;
> +
> +		memset(cp->ctx_arr, 0, arr_size);

cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);

> +		k = 0;
> +		for (i = 0; i < 2; i++) {
> +			u32 j, reg, off, lo, hi;
> +
> +			if (i == 0)
> +				off = BNX2_PG_CTX_MAP;
> +			else
> +				off = BNX2_ISCSI_CTX_MAP;
> +
> +			reg = cnic_reg_rd_ind(dev, off);
> +			lo = reg >> 16;
> +			hi = reg & 0xffff;
> +			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
> +				cp->ctx_arr[k].cid = j;
> +		}
> +
> +		cp->ctx_blks = k;
> +		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
> +			cp->ctx_blks = 0;
> +			return -ENOMEM;
> +		}
> +
> +		for (i = 0; i < cp->ctx_blks; i++) {
> +			cp->ctx_arr[i].ctx =
> +				pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
> +						     &cp->ctx_arr[i].mapping);
> +			if (cp->ctx_arr[i].ctx == NULL)
> +				return -ENOMEM;
> +		}
> +	}
> +	return 0;
> +}
> +
> +static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	struct uio_info *uinfo;
> +	int ret;
> +
> +	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
> +	if (ret)
> +		goto error;
> +	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
> +
> +	ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
> +	if (ret)
> +		goto error;
> +	cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
> +
> +	ret = cnic_alloc_context(dev);
> +	if (ret)
> +		goto error;
> +
> +	cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
> +	cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
> +					   &cp->l2_ring_map);
> +	if (!cp->l2_ring)
> +		goto error;
> +
> +	cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
> +	cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
> +	cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
> +					   &cp->l2_buf_map);
> +	if (!cp->l2_buf)
> +		goto error;
> +
> +	uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
> +	if (!uinfo)
> +		goto error;
> +
> +	uinfo->mem[0].addr = dev->netdev->base_addr;
> +	uinfo->mem[0].internal_addr = dev->regview;
> +	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
> +	uinfo->mem[0].memtype = UIO_MEM_PHYS;
> +
> +	uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
> +	if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
> +		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
> +	else
> +		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
> +	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
> +
> +	uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
> +	uinfo->mem[2].size = cp->l2_ring_size;
> +	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
> +
> +	uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
> +	uinfo->mem[3].size = cp->l2_buf_size;
> +	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
> +
> +	uinfo->name = "bnx2_cnic";
> +	uinfo->version = CNIC_MODULE_VERSION;
> +	uinfo->irq = UIO_IRQ_CUSTOM;
> +
> +	uinfo->open = cnic_uio_open;
> +	uinfo->release = cnic_uio_close;
> +
> +	uinfo->priv = dev;
> +
> +	ret = uio_register_device(&dev->pcidev->dev, uinfo);
> +	if (ret) {
> +		kfree(uinfo);
> +		goto error;
> +	}
> +
> +	cp->cnic_uinfo = uinfo;
> +
> +	return 0;
> +
> +error:
> +	cnic_free_resc(dev);
> +	return ret;
> +}
> +
> +static inline u32 cnic_kwq_avail(struct cnic_local *cp)
> +{
> +	return cp->max_kwq_idx -
> +		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
> +}
> +
> +static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe
> *wqes[], +				  u32 num_wqes)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	struct kwqe *prod_qe;
> +	u16 prod, sw_prod, i;
> +
> +	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
> +		return -EAGAIN;		/* bnx2 is down */
> +
> +	spin_lock_bh(&cp->cnic_ulp_lock);
> +	if (num_wqes > cnic_kwq_avail(cp) &&
> +	    !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
> +		spin_unlock_bh(&cp->cnic_ulp_lock);
> +		return -EAGAIN;
> +	}
> +
> +	cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
> +
> +	prod = cp->kwq_prod_idx;
> +	sw_prod = prod & MAX_KWQ_IDX;
> +	for (i = 0; i < num_wqes; i++) {
> +		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
> +		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
> +		prod++;
> +		sw_prod = prod & MAX_KWQ_IDX;
> +	}
> +	cp->kwq_prod_idx = prod;
> +
> +	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
> +
> +	spin_unlock_bh(&cp->cnic_ulp_lock);
> +	return 0;
> +}
> +
> +static void service_kcqes(struct cnic_dev *dev, int num_cqes)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	int i, j;
> +
> +	i = 0;
> +	j = 1;
> +	while (num_cqes) {
> +		struct cnic_ulp_ops *ulp_ops;
> +		int ulp_type;
> +		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
> +		u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
> +
> +		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
> +			cnic_kwq_completion(dev, 1);
> +
> +		while (j < num_cqes) {
> +			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
> +
> +			if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
> +				break;
> +
> +			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
> +				cnic_kwq_completion(dev, 1);
> +			j++;
> +		}
> +
> +		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
> +			ulp_type = CNIC_ULP_RDMA;
> +		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
> +			ulp_type = CNIC_ULP_ISCSI;
> +		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
> +			ulp_type = CNIC_ULP_L4;
> +		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
> +			goto end;
> +		else {
> +			printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
> +			       dev->netdev->name, kcqe_op_flag);
> +			goto end;
> +		}
> +
> +		rcu_read_lock();
> +		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
> +		if (likely(ulp_ops)) {
> +			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
> +						  cp->completed_kcq + i, j);
> +		}
> +		rcu_read_unlock();
> +end:
> +		num_cqes -= j;
> +		i += j;
> +		j = 1;
> +	}
> +	return;
> +}
> +
> +static u16 cnic_bnx2_next_idx(u16 idx)
> +{
> +	return idx + 1;
> +}
> +
> +static u16 cnic_bnx2_hw_idx(u16 idx)
> +{
> +	return idx;
> +}
> +
> +static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	u16 i, ri, last;
> +	struct kcqe *kcqe;
> +	int kcqe_cnt = 0, last_cnt = 0;
> +
> +	i = ri = last = *sw_prod;
> +	ri &= MAX_KCQ_IDX;
> +
> +	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
> +		kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
> +		cp->completed_kcq[kcqe_cnt++] = kcqe;
> +		i = cp->next_idx(i);
> +		ri = i & MAX_KCQ_IDX;
> +		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
> +			last_cnt = kcqe_cnt;
> +			last = i;
> +		}
> +	}
> +
> +	*sw_prod = last;
> +	return last_cnt;
> +}
> +
> +static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
> +{
> +	u16 rx_cons = *cp->rx_cons_ptr;
> +	u16 tx_cons = *cp->tx_cons_ptr;
> +
> +	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
> +		cp->tx_cons = tx_cons;
> +		cp->rx_cons = rx_cons;
> +		uio_event_notify(cp->cnic_uinfo);
> +	}
> +}
> +
> +static int cnic_service_bnx2(void *data, void *status_blk)
> +{
> +	struct cnic_dev *dev = data;
> +	struct status_block *sblk = status_blk;
> +	struct cnic_local *cp = dev->cnic_priv;
> +	u32 status_idx = sblk->status_idx;
> +	u16 hw_prod, sw_prod;
> +	int kcqe_cnt;
> +
> +	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
> +		return status_idx;
> +
> +	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
> +
> +	hw_prod = sblk->status_completion_producer_index;
> +	sw_prod = cp->kcq_prod_idx;
> +	while (sw_prod != hw_prod) {
> +		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
> +		if (kcqe_cnt == 0)
> +			goto done;
> +
> +		service_kcqes(dev, kcqe_cnt);
> +
> +		/* Tell compiler that status_blk fields can change. */
> +		barrier();
> +		if (status_idx != sblk->status_idx) {
> +			status_idx = sblk->status_idx;
> +			cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
> +			hw_prod = sblk->status_completion_producer_index;
> +		} else
> +			break;
> +	}
> +
> +done:
> +	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
> +
> +	cp->kcq_prod_idx = sw_prod;
> +
> +	cnic_chk_bnx2_pkt_rings(cp);
> +	return status_idx;
> +}
> +
> +static void cnic_service_bnx2_msix(unsigned long data)
> +{
> +	struct cnic_dev *dev = (struct cnic_dev *) data;
> +	struct cnic_local *cp = dev->cnic_priv;
> +	struct status_block_msix *status_blk = cp->bnx2_status_blk;
> +	u32 status_idx = status_blk->status_idx;
> +	u16 hw_prod, sw_prod;
> +	int kcqe_cnt;
> +
> +	cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
> +
> +	hw_prod = status_blk->status_completion_producer_index;
> +	sw_prod = cp->kcq_prod_idx;
> +	while (sw_prod != hw_prod) {
> +		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
> +		if (kcqe_cnt == 0)
> +			goto done;
> +
> +		service_kcqes(dev, kcqe_cnt);
> +
> +		/* Tell compiler that status_blk fields can change. */
> +		barrier();
> +		if (status_idx != status_blk->status_idx) {
> +			status_idx = status_blk->status_idx;
> +			cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
> +			hw_prod = status_blk->status_completion_producer_index;
> +		} else
> +			break;
> +	}
> +
> +done:
> +	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
> +	cp->kcq_prod_idx = sw_prod;
> +
> +	cnic_chk_bnx2_pkt_rings(cp);
> +
> +	cp->last_status_idx = status_idx;
> +	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
> +		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
> +}
> +
> +static irqreturn_t cnic_irq(int irq, void *dev_instance)
> +{
> +	struct cnic_dev *dev = dev_instance;
> +	struct cnic_local *cp = dev->cnic_priv;
> +	u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
> +
> +	if (cp->ack_int)
> +		cp->ack_int(dev);
> +
> +	prefetch(cp->status_blk);
> +	prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
> +
> +	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
> +		tasklet_schedule(&cp->cnic_irq_task);
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static void cnic_ulp_stop(struct cnic_dev *dev)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	int if_type;
> +
> +	rcu_read_lock();
> +	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
> +		struct cnic_ulp_ops *ulp_ops;
> +
> +		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
> +		if (!ulp_ops)
> +			continue;
> +
> +		if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
> +			ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
> +	}
> +	rcu_read_unlock();
> +}
> +
> +static void cnic_ulp_start(struct cnic_dev *dev)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	int if_type;
> +
> +	rcu_read_lock();
> +	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
> +		struct cnic_ulp_ops *ulp_ops;
> +
> +		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
> +		if (!ulp_ops || !ulp_ops->cnic_start)
> +			continue;
> +
> +		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
> +			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
> +	}
> +	rcu_read_unlock();
> +}
> +
> +static int cnic_ctl(void *data, struct cnic_ctl_info *info)
> +{
> +	struct cnic_dev *dev = data;
> +
> +	switch (info->cmd) {
> +	case CNIC_CTL_STOP_CMD:
> +		cnic_hold(dev);
> +		mutex_lock(&cnic_lock);
> +
> +		cnic_ulp_stop(dev);
> +		cnic_stop_hw(dev);
> +
> +		mutex_unlock(&cnic_lock);
> +		cnic_put(dev);
> +		break;
> +	case CNIC_CTL_START_CMD:
> +		cnic_hold(dev);
> +		mutex_lock(&cnic_lock);
> +
> +		if (!cnic_start_hw(dev))
> +			cnic_ulp_start(dev);
> +
> +		mutex_unlock(&cnic_lock);
> +		cnic_put(dev);
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
> +static void cnic_ulp_init(struct cnic_dev *dev)
> +{
> +	int i;
> +	struct cnic_local *cp = dev->cnic_priv;
> +
> +	rcu_read_lock();
> +	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
> +		struct cnic_ulp_ops *ulp_ops;
> +
> +		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
> +		if (!ulp_ops || !ulp_ops->cnic_init)
> +			continue;
> +
> +		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
> +			ulp_ops->cnic_init(dev);
> +
> +	}
> +	rcu_read_unlock();
> +}
> +
> +static void cnic_ulp_exit(struct cnic_dev *dev)
> +{
> +	int i;
> +	struct cnic_local *cp = dev->cnic_priv;
> +
> +	rcu_read_lock();
> +	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
> +		struct cnic_ulp_ops *ulp_ops;
> +
> +		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
> +		if (!ulp_ops || !ulp_ops->cnic_exit)
> +			continue;
> +
> +		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
> +			ulp_ops->cnic_exit(dev);
> +
> +	}
> +	rcu_read_unlock();
> +}
> +
> +static int cnic_cm_offload_pg(struct cnic_sock *csk)
> +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct l4_kwq_offload_pg *l4kwqe;
> +	struct kwqe *wqes[1];
> +
> +	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
> +	memset(l4kwqe, 0, sizeof(*l4kwqe));
> +	wqes[0] = (struct kwqe *) l4kwqe;
> +
> +	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
> +	l4kwqe->flags =
> +		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
> +	l4kwqe->l2hdr_nbytes = ETH_HLEN;
> +
> +	l4kwqe->da0 = csk->ha[0];
> +	l4kwqe->da1 = csk->ha[1];
> +	l4kwqe->da2 = csk->ha[2];
> +	l4kwqe->da3 = csk->ha[3];
> +	l4kwqe->da4 = csk->ha[4];
> +	l4kwqe->da5 = csk->ha[5];
> +
> +	l4kwqe->sa0 = dev->mac_addr[0];
> +	l4kwqe->sa1 = dev->mac_addr[1];
> +	l4kwqe->sa2 = dev->mac_addr[2];
> +	l4kwqe->sa3 = dev->mac_addr[3];
> +	l4kwqe->sa4 = dev->mac_addr[4];
> +	l4kwqe->sa5 = dev->mac_addr[5];
> +
> +	l4kwqe->etype = ETH_P_IP;
> +	l4kwqe->ipid_count = DEF_IPID_COUNT;
> +	l4kwqe->host_opaque = csk->l5_cid;
> +
> +	if (csk->vlan_id) {
> +		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
> +		l4kwqe->vlan_tag = csk->vlan_id;
> +		l4kwqe->l2hdr_nbytes += 4;
> +	}
> +
> +	return dev->submit_kwqes(dev, wqes, 1);
> +}
> +
> +static int cnic_cm_update_pg(struct cnic_sock *csk)
> +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct l4_kwq_update_pg *l4kwqe;
> +	struct kwqe *wqes[1];
> +
> +	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
> +	memset(l4kwqe, 0, sizeof(*l4kwqe));
> +	wqes[0] = (struct kwqe *) l4kwqe;
> +
> +	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
> +	l4kwqe->flags =
> +		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
> +	l4kwqe->pg_cid = csk->pg_cid;
> +
> +	l4kwqe->da0 = csk->ha[0];
> +	l4kwqe->da1 = csk->ha[1];
> +	l4kwqe->da2 = csk->ha[2];
> +	l4kwqe->da3 = csk->ha[3];
> +	l4kwqe->da4 = csk->ha[4];
> +	l4kwqe->da5 = csk->ha[5];
> +
> +	l4kwqe->pg_host_opaque = csk->l5_cid;
> +	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
> +
> +	return dev->submit_kwqes(dev, wqes, 1);
> +}
> +
> +static int cnic_cm_upload_pg(struct cnic_sock *csk)
> +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct l4_kwq_upload *l4kwqe;
> +	struct kwqe *wqes[1];
> +
> +	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
> +	memset(l4kwqe, 0, sizeof(*l4kwqe));
> +	wqes[0] = (struct kwqe *) l4kwqe;
> +
> +	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
> +	l4kwqe->flags =
> +		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
> +	l4kwqe->cid = csk->pg_cid;
> +
> +	return dev->submit_kwqes(dev, wqes, 1);
> +}
> +
> +static int cnic_cm_conn_req(struct cnic_sock *csk)
> +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct l4_kwq_connect_req1 *l4kwqe1;
> +	struct l4_kwq_connect_req2 *l4kwqe2;
> +	struct l4_kwq_connect_req3 *l4kwqe3;
> +	struct kwqe *wqes[3];
> +	u8 tcp_flags = 0;
> +	int num_wqes = 2;
> +
> +	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
> +	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
> +	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
> +	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
> +	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
> +	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
> +
> +	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
> +	l4kwqe3->flags =
> +		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
> +	l4kwqe3->ka_timeout = csk->ka_timeout;
> +	l4kwqe3->ka_interval = csk->ka_interval;
> +	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
> +	l4kwqe3->tos = csk->tos;
> +	l4kwqe3->ttl = csk->ttl;
> +	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
> +	l4kwqe3->pmtu = csk->mtu;
> +	l4kwqe3->rcv_buf = csk->rcv_buf;
> +	l4kwqe3->snd_buf = csk->snd_buf;
> +	l4kwqe3->seed = csk->seed;
> +
> +	wqes[0] = (struct kwqe *) l4kwqe1;
> +	if (test_bit(SK_F_IPV6, &csk->flags)) {
> +		wqes[1] = (struct kwqe *) l4kwqe2;
> +		wqes[2] = (struct kwqe *) l4kwqe3;
> +		num_wqes = 3;
> +
> +		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
> +		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
> +		l4kwqe2->flags =
> +			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
> +			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
> +		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
> +		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
> +		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
> +		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
> +		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
> +		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
> +		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
> +			       sizeof(struct tcphdr);
> +	} else {
> +		wqes[1] = (struct kwqe *) l4kwqe3;
> +		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
> +			       sizeof(struct tcphdr);
> +	}
> +
> +	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
> +	l4kwqe1->flags =
> +		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
> +		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
> +	l4kwqe1->cid = csk->cid;
> +	l4kwqe1->pg_cid = csk->pg_cid;
> +	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
> +	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
> +	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
> +	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
> +	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
> +		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
> +	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
> +		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
> +	if (csk->tcp_flags & SK_TCP_NAGLE)
> +		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
> +	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
> +		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
> +	if (csk->tcp_flags & SK_TCP_SACK)
> +		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
> +	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
> +		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
> +
> +	l4kwqe1->tcp_flags = tcp_flags;
> +
> +	return dev->submit_kwqes(dev, wqes, num_wqes);
> +}
> +
> +static int cnic_cm_close_req(struct cnic_sock *csk)
> +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct l4_kwq_close_req *l4kwqe;
> +	struct kwqe *wqes[1];
> +
> +	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
> +	memset(l4kwqe, 0, sizeof(*l4kwqe));
> +	wqes[0] = (struct kwqe *) l4kwqe;
> +
> +	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
> +	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
> +	l4kwqe->cid = csk->cid;
> +
> +	return dev->submit_kwqes(dev, wqes, 1);
> +}
> +
> +static int cnic_cm_abort_req(struct cnic_sock *csk)
> +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct l4_kwq_reset_req *l4kwqe;
> +	struct kwqe *wqes[1];
> +
> +	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
> +	memset(l4kwqe, 0, sizeof(*l4kwqe));
> +	wqes[0] = (struct kwqe *) l4kwqe;
> +
> +	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
> +	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
> +	l4kwqe->cid = csk->cid;
> +
> +	return dev->submit_kwqes(dev, wqes, 1);
> +}
> +
> +static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
> +			  u32 l5_cid, struct cnic_sock **csk, void *context)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	struct cnic_sock *csk1;
> +
> +	if (l5_cid >= MAX_CM_SK_TBL_SZ)
> +		return -EINVAL;
> +
> +	csk1 = &cp->csk_tbl[l5_cid];
> +	if (atomic_read(&csk1->ref_count))
> +		return -EAGAIN;
> +
> +	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
> +		return -EBUSY;
> +
> +	csk1->dev = dev;
> +	csk1->cid = cid;
> +	csk1->l5_cid = l5_cid;
> +	csk1->ulp_type = ulp_type;
> +	csk1->context = context;
> +
> +	csk1->ka_timeout = DEF_KA_TIMEOUT;
> +	csk1->ka_interval = DEF_KA_INTERVAL;
> +	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
> +	csk1->tos = DEF_TOS;
> +	csk1->ttl = DEF_TTL;
> +	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
> +	csk1->rcv_buf = DEF_RCV_BUF;
> +	csk1->snd_buf = DEF_SND_BUF;
> +	csk1->seed = DEF_SEED;
> +
> +	*csk = csk1;
> +	return 0;
> +}
> +
> +static void cnic_cm_cleanup(struct cnic_sock *csk)
> +{
> +	if (csk->src_port) {
> +		struct cnic_dev *dev = csk->dev;
> +		struct cnic_local *cp = dev->cnic_priv;
> +
> +		cnic_free_id(&cp->csk_port_tbl, csk->src_port);
> +		csk->src_port = 0;
> +	}
> +}
> +
> +static void cnic_close_conn(struct cnic_sock *csk)
> +{
> +	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
> +		cnic_cm_upload_pg(csk);
> +		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
> +	}
> +	cnic_cm_cleanup(csk);
> +}
> +
> +static int cnic_cm_destroy(struct cnic_sock *csk)
> +{
> +	if (!cnic_in_use(csk))
> +		return -EINVAL;
> +
> +	csk_hold(csk);
> +	clear_bit(SK_F_INUSE, &csk->flags);
> +	smp_mb__after_clear_bit();
> +	while (atomic_read(&csk->ref_count) != 1)
> +		msleep(1);
> +	cnic_cm_cleanup(csk);
> +
> +	csk->flags = 0;
> +	csk_put(csk);
> +	return 0;
> +}
> +
> +static inline u16 cnic_get_vlan(struct net_device *dev,
> +				struct net_device **vlan_dev)
> +{
> +	if (dev->priv_flags & IFF_802_1Q_VLAN) {
> +		*vlan_dev = vlan_dev_real_dev(dev);
> +		return vlan_dev_vlan_id(dev);
> +	}
> +	*vlan_dev = dev;
> +	return 0;
> +}
> +
> +static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
> +			     struct dst_entry **dst)
> +{
> +	struct flowi fl;
> +	int err;
> +	struct rtable *rt;
> +
> +	memset(&fl, 0, sizeof(fl));
> +	fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
> +
> +	err = ip_route_output_key(&init_net, &rt, &fl);
> +	if (!err)
> +		*dst = &rt->u.dst;
> +	return err;
> +}
> +
> +static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
> +			     struct dst_entry **dst)
> +{
> +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
> +	struct flowi fl;
> +
> +	memset(&fl, 0, sizeof(fl));
> +	ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
> +	if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
> +		fl.oif = dst_addr->sin6_scope_id;
> +
> +	*dst = ip6_route_output(&init_net, NULL, &fl);
> +	if (*dst)
> +		return 0;
> +#endif
> +
> +	return -ENETUNREACH;
> +}
> +
> +static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
> +					   int ulp_type)
> +{
> +	struct cnic_dev *dev = NULL;
> +	struct dst_entry *dst;
> +	struct net_device *netdev = NULL;
> +	int err = -ENETUNREACH;
> +
> +	if (dst_addr->sin_family == AF_INET)
> +		err = cnic_get_v4_route(dst_addr, &dst);
> +	else if (dst_addr->sin_family == AF_INET6) {
> +		struct sockaddr_in6 *dst_addr6 =
> +			(struct sockaddr_in6 *) dst_addr;
> +
> +		err = cnic_get_v6_route(dst_addr6, &dst);
> +	} else
> +		return NULL;
> +
> +	if (err)
> +		return NULL;
> +
> +	if (!dst->dev)
> +		goto done;
> +
> +	cnic_get_vlan(dst->dev, &netdev);
> +
> +	dev = cnic_from_netdev(netdev);
> +
> +done:
> +	dst_release(dst);
> +	if (dev)
> +		cnic_put(dev);
> +	return dev;
> +}
> +
> +static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr
> *saddr) +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct cnic_local *cp = dev->cnic_priv;
> +
> +	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
> +}
> +
> +static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr
> *saddr) +{
> +	struct cnic_dev *dev = csk->dev;
> +	struct cnic_local *cp = dev->cnic_priv;
> +	int is_v6, err, rc = -ENETUNREACH;
> +	struct dst_entry *dst;
> +	struct net_device *realdev;
> +	u32 local_port;
> +
> +	if (saddr->local.v6.sin6_family == AF_INET6 &&
> +	    saddr->remote.v6.sin6_family == AF_INET6)
> +		is_v6 = 1;
> +	else if (saddr->local.v4.sin_family == AF_INET &&
> +		 saddr->remote.v4.sin_family == AF_INET)
> +		is_v6 = 0;
> +	else
> +		return -EINVAL;
> +
> +	clear_bit(SK_F_IPV6, &csk->flags);
> +
> +	if (is_v6) {
> +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
> +		set_bit(SK_F_IPV6, &csk->flags);
> +		err = cnic_get_v6_route(&saddr->remote.v6, &dst);
> +		if (err)
> +			return err;
> +
> +		if (!dst || dst->error || !dst->dev)
> +			goto err_out;
> +
> +		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
> +		       sizeof(struct in6_addr));
> +		csk->dst_port = saddr->remote.v6.sin6_port;
> +		local_port = saddr->local.v6.sin6_port;
> +#else
> +		return rc;
> +#endif
> +
> +	} else {
> +		err = cnic_get_v4_route(&saddr->remote.v4, &dst);
> +		if (err)
> +			return err;
> +
> +		if (!dst || dst->error || !dst->dev)
> +			goto err_out;
> +
> +		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
> +		csk->dst_port = saddr->remote.v4.sin_port;
> +		local_port = saddr->local.v4.sin_port;
> +	}
> +
> +	csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
> +	if (realdev != dev->netdev)
> +		goto err_out;
> +
> +	if (local_port >= CNIC_LOCAL_PORT_MIN &&
> +	    local_port < CNIC_LOCAL_PORT_MAX) {
> +		if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
> +			local_port = 0;
> +	} else
> +		local_port = 0;
> +
> +	if (!local_port) {
> +		local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
> +		if (local_port == -1) {
> +			rc = -ENOMEM;
> +			goto err_out;
> +		}
> +	}
> +	csk->src_port = local_port;
> +
> +	csk->mtu = dst_mtu(dst);
> +	rc = 0;
> +
> +err_out:
> +	dst_release(dst);
> +	return rc;
> +}
> +
> +static void cnic_init_csk_state(struct cnic_sock *csk)
> +{
> +	csk->state = 0;
> +	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
> +	clear_bit(SK_F_CLOSING, &csk->flags);
> +}
> +
> +static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr
> *saddr) +{
> +	int err = 0;
> +
> +	if (!cnic_in_use(csk))
> +		return -EINVAL;
> +
> +	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
> +		return -EINVAL;
> +
> +	cnic_init_csk_state(csk);
> +
> +	err = cnic_get_route(csk, saddr);
> +	if (err)
> +		goto err_out;
> +
> +	err = cnic_resolve_addr(csk, saddr);
> +	if (!err)
> +		return 0;
> +
> +err_out:
> +	clear_bit(SK_F_CONNECT_START, &csk->flags);
> +	return err;
> +}
> +
> +static int cnic_cm_abort(struct cnic_sock *csk)
> +{
> +	struct cnic_local *cp = csk->dev->cnic_priv;
> +	u32 opcode;
> +
> +	if (!cnic_in_use(csk))
> +		return -EINVAL;
> +
> +	if (cnic_abort_prep(csk))
> +		return cnic_cm_abort_req(csk);
> +
> +	/* Getting here means that we haven't started connect, or
> +	 * connect was not successful.
> +	 */
> +
> +	csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
> +	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
> +		opcode = csk->state;
> +	else
> +		opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
> +	cp->close_conn(csk, opcode);
> +
> +	return 0;
> +}
> +
> +static int cnic_cm_close(struct cnic_sock *csk)
> +{
> +	if (!cnic_in_use(csk))
> +		return -EINVAL;
> +
> +	if (cnic_close_prep(csk)) {
> +		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
> +		return cnic_cm_close_req(csk);
> +	}
> +	return 0;
> +}
> +
> +static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
> +			   u8 opcode)
> +{
> +	struct cnic_ulp_ops *ulp_ops;
> +	int ulp_type = csk->ulp_type;
> +
> +	rcu_read_lock();
> +	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
> +	if (ulp_ops) {
> +		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
> +			ulp_ops->cm_connect_complete(csk);
> +		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
> +			ulp_ops->cm_close_complete(csk);
> +		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
> +			ulp_ops->cm_remote_abort(csk);
> +		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
> +			ulp_ops->cm_abort_complete(csk);
> +		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
> +			ulp_ops->cm_remote_close(csk);
> +	}
> +	rcu_read_unlock();
> +}
> +
> +static int cnic_cm_set_pg(struct cnic_sock *csk)
> +{
> +	if (cnic_offld_prep(csk)) {
> +		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
> +			cnic_cm_update_pg(csk);
> +		else
> +			cnic_cm_offload_pg(csk);
> +	}
> +	return 0;
> +}
> +
> +static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq
> *kcqe) +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	u32 l5_cid = kcqe->pg_host_opaque;
> +	u8 opcode = kcqe->op_code;
> +	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
> +
> +	csk_hold(csk);
> +	if (!cnic_in_use(csk))
> +		goto done;
> +
> +	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
> +		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
> +		goto done;
> +	}
> +	csk->pg_cid = kcqe->pg_cid;
> +	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
> +	cnic_cm_conn_req(csk);
> +
> +done:
> +	csk_put(csk);
> +}
> +
> +static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
> +	u8 opcode = l4kcqe->op_code;
> +	u32 l5_cid;
> +	struct cnic_sock *csk;
> +
> +	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
> +	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
> +		cnic_cm_process_offld_pg(dev, l4kcqe);
> +		return;
> +	}
> +
> +	l5_cid = l4kcqe->conn_id;
> +	if (opcode & 0x80)
> +		l5_cid = l4kcqe->cid;
> +	if (l5_cid >= MAX_CM_SK_TBL_SZ)
> +		return;
> +
> +	csk = &cp->csk_tbl[l5_cid];
> +	csk_hold(csk);
> +
> +	if (!cnic_in_use(csk)) {
> +		csk_put(csk);
> +		return;
> +	}
> +
> +	switch (opcode) {
> +	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
> +		if (l4kcqe->status == 0)
> +			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
> +
> +		smp_mb__before_clear_bit();
> +		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
> +		cnic_cm_upcall(cp, csk, opcode);
> +		break;
> +
> +	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
> +		if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
> +			csk->state = opcode;
> +		/* fall through */
> +	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
> +	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
> +		cp->close_conn(csk, opcode);
> +		break;
> +
> +	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
> +		cnic_cm_upcall(cp, csk, opcode);
> +		break;
> +	}
> +	csk_put(csk);
> +}
> +
> +static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32
> num) +{
> +	struct cnic_dev *dev = data;
> +	int i;
> +
> +	for (i = 0; i < num; i++)
> +		cnic_cm_process_kcqe(dev, kcqe[i]);
> +}
> +
> +static struct cnic_ulp_ops cm_ulp_ops = {
> +	.indicate_kcqes		= cnic_cm_indicate_kcqe,
> +};
> +
> +static void cnic_cm_free_mem(struct cnic_dev *dev)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +
> +	kfree(cp->csk_tbl);
> +	cp->csk_tbl = NULL;
> +	cnic_free_id_tbl(&cp->csk_port_tbl);
> +}
> +
> +static int cnic_cm_alloc_mem(struct cnic_dev *dev)
> +{
> +	struct cnic_local *cp = dev->cnic_priv;
> +
> +	cp->csk_tbl = kmalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
> +			      GFP_KERNEL);
> +	if (!cp->csk_tbl)
> +		return -ENOMEM;
> +	memset(cp->csk_tbl, 0, sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ);

cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
			      GFP_KERNEL);


There are some more of these.

Greetings,

Eike

[-- Attachment #2: This is a digitally signed message part. --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/4] cnic: Add new Broadcom CNIC driver.
  2009-05-25 15:19     ` Rolf Eike Beer
@ 2009-05-26  5:35       ` Michael Chan
  0 siblings, 0 replies; 11+ messages in thread
From: Michael Chan @ 2009-05-26  5:35 UTC (permalink / raw)
  To: Rolf Eike Beer
  Cc: James.Bottomley, michaelc, davem, linux-scsi, open-iscsi,
	Anil Veerabhadrappa, Benjamin Li

On Mon, 2009-05-25 at 08:19 -0700, Rolf Eike Beer wrote:
> cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
>                               GFP_KERNEL);
> 
> 
> There are some more of these.

Thanks.  I've changed all of these to kzalloc().

>From e7cbc3df66fb0b9c7036478be292f2db7be78153 Mon Sep 17 00:00:00 2001
From: Michael Chan <mchan@broadcom.com>
Date: Mon, 25 May 2009 22:28:35 -0700
Subject: [PATCH 3/4] cnic: Add new Broadcom CNIC driver.

The CNIC driver controls BNX2 hardware rings and resources used by
iSCSI.  Most hardware resources for iSCSI are separate from those
used for ethernet networking.

iSCSI uses a separate MAC address and IP address.  The CNIC driver
creates a UIO interface to handle the non-offloaded packets such as
ARP, etc in userspace.

Signed-off-by: Michael Chan <mchan@broadcom.com>
Acked-by: David S. Miller <davem@davemloft.net>
---
 drivers/net/Kconfig     |   11 +
 drivers/net/Makefile    |    1 +
 drivers/net/cnic.c      | 2711 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/cnic.h      |  299 ++++++
 drivers/net/cnic_defs.h |  580 ++++++++++
 drivers/net/cnic_if.h   |  299 ++++++
 6 files changed, 3901 insertions(+), 0 deletions(-)
 create mode 100644 drivers/net/cnic.c
 create mode 100644 drivers/net/cnic.h
 create mode 100644 drivers/net/cnic_defs.h
 create mode 100644 drivers/net/cnic_if.h

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d..f3c4a3b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2264,6 +2264,17 @@ config BNX2
 	  To compile this driver as a module, choose M here: the module
 	  will be called bnx2.  This is recommended.
 
+config CNIC
+	tristate "Broadcom CNIC support"
+	depends on BNX2
+	depends on UIO
+	help
+	  This driver supports offload features of Broadcom NetXtremeII
+	  gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cnic.  This is recommended.
+
 config SPIDER_NET
 	tristate "Spider Gigabit Ethernet driver"
 	depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1fc4602..e6f1f8c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
 obj-$(CONFIG_BNX2X) += bnx2x.o
 bnx2x-objs := bnx2x_main.o bnx2x_link.o
 spidernet-y += spider_net.o spider_net_ethtool.o
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 0000000..8d74037
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2711 @@
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
+ * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <scsi/iscsi_if.h>
+
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define DRV_MODULE_NAME		"cnic"
+#define PFX DRV_MODULE_NAME	": "
+
+static char version[] __devinitdata =
+	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
+	      "Chen (zongxi@broadcom.com");
+MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+static LIST_HEAD(cnic_dev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_dev *dev = uinfo->priv;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (cp->uio_dev != -1)
+		return -EBUSY;
+
+	cp->uio_dev = iminor(inode);
+
+	cnic_shutdown_bnx2_rx_ring(dev);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_dev *dev = uinfo->priv;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->uio_dev = -1;
+	return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+	atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+	atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+	atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+	atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+	struct cnic_dev *cdev;
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(cdev, &cnic_dev_list, list) {
+		if (netdev == cdev->netdev) {
+			cnic_hold(cdev);
+			read_unlock(&cnic_dev_lock);
+			return cdev;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+	return NULL;
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_CTX_WR_CMD;
+	io->cid_addr = cid_addr;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_WR_CMD;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_RD_CMD;
+	io->offset = off;
+	ethdev->drv_ctl(dev->netdev, &info);
+	return io->data;
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+	return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+
+	info.cmd = DRV_CTL_COMPLETION_CMD;
+	info.data.comp.comp_count = count;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+			   struct cnic_sock *csk)
+{
+	struct iscsi_path path_req;
+	char *buf = NULL;
+	u16 len = 0;
+	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (cp->uio_dev == -1)
+		return -ENODEV;
+
+	if (csk) {
+		len = sizeof(path_req);
+		buf = (char *) &path_req;
+		memset(&path_req, 0, len);
+
+		msg_type = ISCSI_KEVENT_PATH_REQ;
+		path_req.handle = (u64) csk->l5_cid;
+		if (test_bit(SK_F_IPV6, &csk->flags)) {
+			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+			       sizeof(struct in6_addr));
+			path_req.ip_addr_len = 16;
+		} else {
+			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+			       sizeof(struct in_addr));
+			path_req.ip_addr_len = 4;
+		}
+		path_req.vlan_id = csk->vlan_id;
+		path_req.pmtu = csk->mtu;
+	}
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
+	if (ulp_ops)
+		ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
+	rcu_read_unlock();
+	return 0;
+}
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+				  char *buf, u16 len)
+{
+	int rc = -EINVAL;
+
+	switch (msg_type) {
+	case ISCSI_UEVENT_PATH_UPDATE: {
+		struct cnic_local *cp;
+		u32 l5_cid;
+		struct cnic_sock *csk;
+		struct iscsi_path *path_resp;
+
+		if (len < sizeof(*path_resp))
+			break;
+
+		path_resp = (struct iscsi_path *) buf;
+		cp = dev->cnic_priv;
+		l5_cid = (u32) path_resp->handle;
+		if (l5_cid >= MAX_CM_SK_TBL_SZ)
+			break;
+
+		csk = &cp->csk_tbl[l5_cid];
+		csk_hold(csk);
+		if (cnic_in_use(csk)) {
+			memcpy(csk->ha, path_resp->mac_addr, 6);
+			if (test_bit(SK_F_IPV6, &csk->flags))
+				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+				       sizeof(struct in6_addr));
+			else
+				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+				       sizeof(struct in_addr));
+			if (is_valid_ether_addr(csk->ha))
+				cnic_cm_set_pg(csk);
+		}
+		csk_put(csk);
+		rc = 0;
+	}
+	}
+
+	return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		return 0;
+
+	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+			msleep(1);
+
+		return 1;
+	}
+	return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		msleep(1);
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+		return 1;
+	}
+
+	return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl[ulp_type]) {
+		printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
+				    "been registered\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+	}
+	read_unlock(&cnic_dev_lock);
+
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+	mutex_unlock(&cnic_lock);
+
+	/* Prevent race conditions with netdev_event */
+	rtnl_lock();
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_init(dev);
+	}
+	read_unlock(&cnic_dev_lock);
+	rtnl_unlock();
+
+	return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (!cnic_ulp_tbl[ulp_type]) {
+		printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
+				    "been registered\n", ulp_type);
+		goto out_unlock;
+	}
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+			printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
+			       "still has devices registered\n", ulp_type);
+			read_unlock(&cnic_dev_lock);
+			goto out_unlock;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+
+	mutex_unlock(&cnic_lock);
+	synchronize_rcu();
+	return 0;
+
+out_unlock:
+	mutex_unlock(&cnic_lock);
+	return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+				void *ulp_ctx)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl[ulp_type] == NULL) {
+		printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
+				    "has not been registered\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EAGAIN;
+	}
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
+		       "been registered to this device\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+	cp->ulp_handle[ulp_type] = ulp_ctx;
+	ulp_ops = cnic_ulp_tbl[ulp_type];
+	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+	cnic_hold(dev);
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+	mutex_unlock(&cnic_lock);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+		printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
+		       ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+		cnic_put(dev);
+	} else {
+		printk(KERN_ERR PFX "cnic_unregister_device: device not "
+		       "registered to this ulp type %d\n", ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&cnic_lock);
+
+	synchronize_rcu();
+
+	return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
+{
+	id_tbl->start = start_id;
+	id_tbl->max = size;
+	id_tbl->next = 0;
+	spin_lock_init(&id_tbl->lock);
+	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+	if (!id_tbl->table)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+	kfree(id_tbl->table);
+	id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	int ret = -1;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return ret;
+
+	spin_lock(&id_tbl->lock);
+	if (!test_bit(id, id_tbl->table)) {
+		set_bit(id, id_tbl->table);
+		ret = 0;
+	}
+	spin_unlock(&id_tbl->lock);
+	return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+	u32 id;
+
+	spin_lock(&id_tbl->lock);
+	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+	if (id >= id_tbl->max) {
+		id = -1;
+		if (id_tbl->next != 0) {
+			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+			if (id >= id_tbl->next)
+				id = -1;
+		}
+	}
+
+	if (id < id_tbl->max) {
+		set_bit(id, id_tbl->table);
+		id_tbl->next = (id + 1) & (id_tbl->max - 1);
+		id += id_tbl->start;
+	}
+
+	spin_unlock(&id_tbl->lock);
+
+	return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	if (id == -1)
+		return;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return;
+
+	clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+
+	if (!dma->pg_arr)
+		return;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		if (dma->pg_arr[i]) {
+			pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
+					    dma->pg_arr[i], dma->pg_map_arr[i]);
+			dma->pg_arr[i] = NULL;
+		}
+	}
+	if (dma->pgtbl) {
+		pci_free_consistent(dev->pcidev, dma->pgtbl_size,
+				    dma->pgtbl, dma->pgtbl_map);
+		dma->pgtbl = NULL;
+	}
+	kfree(dma->pg_arr);
+	dma->pg_arr = NULL;
+	dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	u32 *page_table = dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in big endian format. */
+		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+		*page_table = (u32) dma->pg_map_arr[i];
+		page_table++;
+	}
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+			  int pages, int use_pg_tbl)
+{
+	int i, size;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+	if (dma->pg_arr == NULL)
+		return -ENOMEM;
+
+	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+	dma->num_pages = pages;
+
+	for (i = 0; i < pages; i++) {
+		dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
+						      BCM_PAGE_SIZE,
+						      &dma->pg_map_arr[i]);
+		if (dma->pg_arr[i] == NULL)
+			goto error;
+	}
+	if (!use_pg_tbl)
+		return 0;
+
+	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
+			  ~(BCM_PAGE_SIZE - 1);
+	dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
+					  &dma->pgtbl_map);
+	if (dma->pgtbl == NULL)
+		goto error;
+
+	cp->setup_pgtbl(dev, dma);
+
+	return 0;
+
+error:
+	cnic_free_dma(dev, dma);
+	return -ENOMEM;
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i = 0;
+
+	if (cp->cnic_uinfo) {
+		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+		while (cp->uio_dev != -1 && i < 15) {
+			msleep(100);
+			i++;
+		}
+		uio_unregister_device(cp->cnic_uinfo);
+		kfree(cp->cnic_uinfo);
+		cp->cnic_uinfo = NULL;
+	}
+
+	if (cp->l2_buf) {
+		pci_free_consistent(dev->pcidev, cp->l2_buf_size,
+				    cp->l2_buf, cp->l2_buf_map);
+		cp->l2_buf = NULL;
+	}
+
+	if (cp->l2_ring) {
+		pci_free_consistent(dev->pcidev, cp->l2_ring_size,
+				    cp->l2_ring, cp->l2_ring_map);
+		cp->l2_ring = NULL;
+	}
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		if (cp->ctx_arr[i].ctx) {
+			pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
+					    cp->ctx_arr[i].ctx,
+					    cp->ctx_arr[i].mapping);
+			cp->ctx_arr[i].ctx = NULL;
+		}
+	}
+	kfree(cp->ctx_arr);
+	cp->ctx_arr = NULL;
+	cp->ctx_blks = 0;
+
+	cnic_free_dma(dev, &cp->gbl_buf_info);
+	cnic_free_dma(dev, &cp->conn_buf_info);
+	cnic_free_dma(dev, &cp->kwq_info);
+	cnic_free_dma(dev, &cp->kcq_info);
+	kfree(cp->iscsi_tbl);
+	cp->iscsi_tbl = NULL;
+	kfree(cp->ctx_tbl);
+	cp->ctx_tbl = NULL;
+
+	cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		int i, k, arr_size;
+
+		cp->ctx_blk_size = BCM_PAGE_SIZE;
+		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+			   sizeof(struct cnic_ctx);
+		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
+		if (cp->ctx_arr == NULL)
+			return -ENOMEM;
+
+		k = 0;
+		for (i = 0; i < 2; i++) {
+			u32 j, reg, off, lo, hi;
+
+			if (i == 0)
+				off = BNX2_PG_CTX_MAP;
+			else
+				off = BNX2_ISCSI_CTX_MAP;
+
+			reg = cnic_reg_rd_ind(dev, off);
+			lo = reg >> 16;
+			hi = reg & 0xffff;
+			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+				cp->ctx_arr[k].cid = j;
+		}
+
+		cp->ctx_blks = k;
+		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+			cp->ctx_blks = 0;
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < cp->ctx_blks; i++) {
+			cp->ctx_arr[i].ctx =
+				pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
+						     &cp->ctx_arr[i].mapping);
+			if (cp->ctx_arr[i].ctx == NULL)
+				return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct uio_info *uinfo;
+	int ret;
+
+	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+	ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
+
+	ret = cnic_alloc_context(dev);
+	if (ret)
+		goto error;
+
+	cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
+	cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
+					   &cp->l2_ring_map);
+	if (!cp->l2_ring)
+		goto error;
+
+	cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+	cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
+	cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
+					   &cp->l2_buf_map);
+	if (!cp->l2_buf)
+		goto error;
+
+	uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
+	if (!uinfo)
+		goto error;
+
+	uinfo->mem[0].addr = dev->netdev->base_addr;
+	uinfo->mem[0].internal_addr = dev->regview;
+	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
+	uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+	uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
+	if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+	else
+		uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
+	uinfo->mem[2].size = cp->l2_ring_size;
+	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
+	uinfo->mem[3].size = cp->l2_buf_size;
+	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->name = "bnx2_cnic";
+	uinfo->version = CNIC_MODULE_VERSION;
+	uinfo->irq = UIO_IRQ_CUSTOM;
+
+	uinfo->open = cnic_uio_open;
+	uinfo->release = cnic_uio_close;
+
+	uinfo->priv = dev;
+
+	ret = uio_register_device(&dev->pcidev->dev, uinfo);
+	if (ret) {
+		kfree(uinfo);
+		goto error;
+	}
+
+	cp->cnic_uinfo = uinfo;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return ret;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+	return cp->max_kwq_idx -
+		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				  u32 num_wqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct kwqe *prod_qe;
+	u16 prod, sw_prod, i;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	if (num_wqes > cnic_kwq_avail(cp) &&
+	    !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+		spin_unlock_bh(&cp->cnic_ulp_lock);
+		return -EAGAIN;
+	}
+
+	cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+
+	prod = cp->kwq_prod_idx;
+	sw_prod = prod & MAX_KWQ_IDX;
+	for (i = 0; i < num_wqes; i++) {
+		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+		prod++;
+		sw_prod = prod & MAX_KWQ_IDX;
+	}
+	cp->kwq_prod_idx = prod;
+
+	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+	return 0;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i, j;
+
+	i = 0;
+	j = 1;
+	while (num_cqes) {
+		struct cnic_ulp_ops *ulp_ops;
+		int ulp_type;
+		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+		u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
+
+		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+			cnic_kwq_completion(dev, 1);
+
+		while (j < num_cqes) {
+			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+			if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
+				break;
+
+			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+				cnic_kwq_completion(dev, 1);
+			j++;
+		}
+
+		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+			ulp_type = CNIC_ULP_RDMA;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+			ulp_type = CNIC_ULP_ISCSI;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+			ulp_type = CNIC_ULP_L4;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+			goto end;
+		else {
+			printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
+			       dev->netdev->name, kcqe_op_flag);
+			goto end;
+		}
+
+		rcu_read_lock();
+		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+		if (likely(ulp_ops)) {
+			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+						  cp->completed_kcq + i, j);
+		}
+		rcu_read_unlock();
+end:
+		num_cqes -= j;
+		i += j;
+		j = 1;
+	}
+	return;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+	return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+	return idx;
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 i, ri, last;
+	struct kcqe *kcqe;
+	int kcqe_cnt = 0, last_cnt = 0;
+
+	i = ri = last = *sw_prod;
+	ri &= MAX_KCQ_IDX;
+
+	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+		kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+		cp->completed_kcq[kcqe_cnt++] = kcqe;
+		i = cp->next_idx(i);
+		ri = i & MAX_KCQ_IDX;
+		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+			last_cnt = kcqe_cnt;
+			last = i;
+		}
+	}
+
+	*sw_prod = last;
+	return last_cnt;
+}
+
+static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
+{
+	u16 rx_cons = *cp->rx_cons_ptr;
+	u16 tx_cons = *cp->tx_cons_ptr;
+
+	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+		cp->tx_cons = tx_cons;
+		cp->rx_cons = rx_cons;
+		uio_event_notify(cp->cnic_uinfo);
+	}
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+	struct status_block *sblk = status_blk;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx = sblk->status_idx;
+	u16 hw_prod, sw_prod;
+	int kcqe_cnt;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return status_idx;
+
+	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+	hw_prod = sblk->status_completion_producer_index;
+	sw_prod = cp->kcq_prod_idx;
+	while (sw_prod != hw_prod) {
+		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+		if (kcqe_cnt == 0)
+			goto done;
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		if (status_idx != sblk->status_idx) {
+			status_idx = sblk->status_idx;
+			cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+			hw_prod = sblk->status_completion_producer_index;
+		} else
+			break;
+	}
+
+done:
+	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+
+	cp->kcq_prod_idx = sw_prod;
+
+	cnic_chk_bnx2_pkt_rings(cp);
+	return status_idx;
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct status_block_msix *status_blk = cp->bnx2_status_blk;
+	u32 status_idx = status_blk->status_idx;
+	u16 hw_prod, sw_prod;
+	int kcqe_cnt;
+
+	cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+
+	hw_prod = status_blk->status_completion_producer_index;
+	sw_prod = cp->kcq_prod_idx;
+	while (sw_prod != hw_prod) {
+		kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+		if (kcqe_cnt == 0)
+			goto done;
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		if (status_idx != status_blk->status_idx) {
+			status_idx = status_blk->status_idx;
+			cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+			hw_prod = status_blk->status_completion_producer_index;
+		} else
+			break;
+	}
+
+done:
+	CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+	cp->kcq_prod_idx = sw_prod;
+
+	cnic_chk_bnx2_pkt_rings(cp);
+
+	cp->last_status_idx = status_idx;
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+	struct cnic_dev *dev = dev_instance;
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
+
+	if (cp->ack_int)
+		cp->ack_int(dev);
+
+	prefetch(cp->status_blk);
+	prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		tasklet_schedule(&cp->cnic_irq_task);
+
+	return IRQ_HANDLED;
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	rcu_read_lock();
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+		if (!ulp_ops)
+			continue;
+
+		if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+	}
+	rcu_read_unlock();
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	rcu_read_lock();
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+		if (!ulp_ops || !ulp_ops->cnic_start)
+			continue;
+
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+	struct cnic_dev *dev = data;
+
+	switch (info->cmd) {
+	case CNIC_CTL_STOP_CMD:
+		cnic_hold(dev);
+		mutex_lock(&cnic_lock);
+
+		cnic_ulp_stop(dev);
+		cnic_stop_hw(dev);
+
+		mutex_unlock(&cnic_lock);
+		cnic_put(dev);
+		break;
+	case CNIC_CTL_START_CMD:
+		cnic_hold(dev);
+		mutex_lock(&cnic_lock);
+
+		if (!cnic_start_hw(dev))
+			cnic_ulp_start(dev);
+
+		mutex_unlock(&cnic_lock);
+		cnic_put(dev);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	rcu_read_lock();
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+		if (!ulp_ops || !ulp_ops->cnic_init)
+			continue;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_init(dev);
+
+	}
+	rcu_read_unlock();
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	rcu_read_lock();
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+		if (!ulp_ops || !ulp_ops->cnic_exit)
+			continue;
+
+		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_exit(dev);
+
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_offload_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+	l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->sa0 = dev->mac_addr[0];
+	l4kwqe->sa1 = dev->mac_addr[1];
+	l4kwqe->sa2 = dev->mac_addr[2];
+	l4kwqe->sa3 = dev->mac_addr[3];
+	l4kwqe->sa4 = dev->mac_addr[4];
+	l4kwqe->sa5 = dev->mac_addr[5];
+
+	l4kwqe->etype = ETH_P_IP;
+	l4kwqe->ipid_count = DEF_IPID_COUNT;
+	l4kwqe->host_opaque = csk->l5_cid;
+
+	if (csk->vlan_id) {
+		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+		l4kwqe->vlan_tag = csk->vlan_id;
+		l4kwqe->l2hdr_nbytes += 4;
+	}
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_update_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+	l4kwqe->pg_cid = csk->pg_cid;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->pg_host_opaque = csk->l5_cid;
+	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_upload *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->pg_cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_connect_req1 *l4kwqe1;
+	struct l4_kwq_connect_req2 *l4kwqe2;
+	struct l4_kwq_connect_req3 *l4kwqe3;
+	struct kwqe *wqes[3];
+	u8 tcp_flags = 0;
+	int num_wqes = 2;
+
+	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+	l4kwqe3->flags =
+		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+	l4kwqe3->ka_timeout = csk->ka_timeout;
+	l4kwqe3->ka_interval = csk->ka_interval;
+	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+	l4kwqe3->tos = csk->tos;
+	l4kwqe3->ttl = csk->ttl;
+	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+	l4kwqe3->pmtu = csk->mtu;
+	l4kwqe3->rcv_buf = csk->rcv_buf;
+	l4kwqe3->snd_buf = csk->snd_buf;
+	l4kwqe3->seed = csk->seed;
+
+	wqes[0] = (struct kwqe *) l4kwqe1;
+	if (test_bit(SK_F_IPV6, &csk->flags)) {
+		wqes[1] = (struct kwqe *) l4kwqe2;
+		wqes[2] = (struct kwqe *) l4kwqe3;
+		num_wqes = 3;
+
+		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+		l4kwqe2->flags =
+			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+			       sizeof(struct tcphdr);
+	} else {
+		wqes[1] = (struct kwqe *) l4kwqe3;
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+			       sizeof(struct tcphdr);
+	}
+
+	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+	l4kwqe1->flags =
+		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+	l4kwqe1->cid = csk->cid;
+	l4kwqe1->pg_cid = csk->pg_cid;
+	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+	if (csk->tcp_flags & SK_TCP_NAGLE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+	if (csk->tcp_flags & SK_TCP_SACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+	l4kwqe1->tcp_flags = tcp_flags;
+
+	return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_close_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_reset_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+			  u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_sock *csk1;
+
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return -EINVAL;
+
+	csk1 = &cp->csk_tbl[l5_cid];
+	if (atomic_read(&csk1->ref_count))
+		return -EAGAIN;
+
+	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+		return -EBUSY;
+
+	csk1->dev = dev;
+	csk1->cid = cid;
+	csk1->l5_cid = l5_cid;
+	csk1->ulp_type = ulp_type;
+	csk1->context = context;
+
+	csk1->ka_timeout = DEF_KA_TIMEOUT;
+	csk1->ka_interval = DEF_KA_INTERVAL;
+	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+	csk1->tos = DEF_TOS;
+	csk1->ttl = DEF_TTL;
+	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+	csk1->rcv_buf = DEF_RCV_BUF;
+	csk1->snd_buf = DEF_SND_BUF;
+	csk1->seed = DEF_SEED;
+
+	*csk = csk1;
+	return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+	if (csk->src_port) {
+		struct cnic_dev *dev = csk->dev;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		cnic_free_id(&cp->csk_port_tbl, csk->src_port);
+		csk->src_port = 0;
+	}
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+		cnic_cm_upload_pg(csk);
+		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	}
+	cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	csk_hold(csk);
+	clear_bit(SK_F_INUSE, &csk->flags);
+	smp_mb__after_clear_bit();
+	while (atomic_read(&csk->ref_count) != 1)
+		msleep(1);
+	cnic_cm_cleanup(csk);
+
+	csk->flags = 0;
+	csk_put(csk);
+	return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+				struct net_device **vlan_dev)
+{
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		*vlan_dev = vlan_dev_real_dev(dev);
+		return vlan_dev_vlan_id(dev);
+	}
+	*vlan_dev = dev;
+	return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+			     struct dst_entry **dst)
+{
+	struct flowi fl;
+	int err;
+	struct rtable *rt;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
+
+	err = ip_route_output_key(&init_net, &rt, &fl);
+	if (!err)
+		*dst = &rt->u.dst;
+	return err;
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+			     struct dst_entry **dst)
+{
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	struct flowi fl;
+
+	memset(&fl, 0, sizeof(fl));
+	ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
+	if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
+		fl.oif = dst_addr->sin6_scope_id;
+
+	*dst = ip6_route_output(&init_net, NULL, &fl);
+	if (*dst)
+		return 0;
+#endif
+
+	return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+					   int ulp_type)
+{
+	struct cnic_dev *dev = NULL;
+	struct dst_entry *dst;
+	struct net_device *netdev = NULL;
+	int err = -ENETUNREACH;
+
+	if (dst_addr->sin_family == AF_INET)
+		err = cnic_get_v4_route(dst_addr, &dst);
+	else if (dst_addr->sin_family == AF_INET6) {
+		struct sockaddr_in6 *dst_addr6 =
+			(struct sockaddr_in6 *) dst_addr;
+
+		err = cnic_get_v6_route(dst_addr6, &dst);
+	} else
+		return NULL;
+
+	if (err)
+		return NULL;
+
+	if (!dst->dev)
+		goto done;
+
+	cnic_get_vlan(dst->dev, &netdev);
+
+	dev = cnic_from_netdev(netdev);
+
+done:
+	dst_release(dst);
+	if (dev)
+		cnic_put(dev);
+	return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	int is_v6, err, rc = -ENETUNREACH;
+	struct dst_entry *dst;
+	struct net_device *realdev;
+	u32 local_port;
+
+	if (saddr->local.v6.sin6_family == AF_INET6 &&
+	    saddr->remote.v6.sin6_family == AF_INET6)
+		is_v6 = 1;
+	else if (saddr->local.v4.sin_family == AF_INET &&
+		 saddr->remote.v4.sin_family == AF_INET)
+		is_v6 = 0;
+	else
+		return -EINVAL;
+
+	clear_bit(SK_F_IPV6, &csk->flags);
+
+	if (is_v6) {
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+		set_bit(SK_F_IPV6, &csk->flags);
+		err = cnic_get_v6_route(&saddr->remote.v6, &dst);
+		if (err)
+			return err;
+
+		if (!dst || dst->error || !dst->dev)
+			goto err_out;
+
+		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+		       sizeof(struct in6_addr));
+		csk->dst_port = saddr->remote.v6.sin6_port;
+		local_port = saddr->local.v6.sin6_port;
+#else
+		return rc;
+#endif
+
+	} else {
+		err = cnic_get_v4_route(&saddr->remote.v4, &dst);
+		if (err)
+			return err;
+
+		if (!dst || dst->error || !dst->dev)
+			goto err_out;
+
+		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+		csk->dst_port = saddr->remote.v4.sin_port;
+		local_port = saddr->local.v4.sin_port;
+	}
+
+	csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
+	if (realdev != dev->netdev)
+		goto err_out;
+
+	if (local_port >= CNIC_LOCAL_PORT_MIN &&
+	    local_port < CNIC_LOCAL_PORT_MAX) {
+		if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
+			local_port = 0;
+	} else
+		local_port = 0;
+
+	if (!local_port) {
+		local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
+		if (local_port == -1) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+	}
+	csk->src_port = local_port;
+
+	csk->mtu = dst_mtu(dst);
+	rc = 0;
+
+err_out:
+	dst_release(dst);
+	return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+	csk->state = 0;
+	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+	clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	int err = 0;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+		return -EINVAL;
+
+	cnic_init_csk_state(csk);
+
+	err = cnic_get_route(csk, saddr);
+	if (err)
+		goto err_out;
+
+	err = cnic_resolve_addr(csk, saddr);
+	if (!err)
+		return 0;
+
+err_out:
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+	struct cnic_local *cp = csk->dev->cnic_priv;
+	u32 opcode;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_abort_prep(csk))
+		return cnic_cm_abort_req(csk);
+
+	/* Getting here means that we haven't started connect, or
+	 * connect was not successful.
+	 */
+
+	csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+		opcode = csk->state;
+	else
+		opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+	cp->close_conn(csk, opcode);
+
+	return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_close_prep(csk)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+		return cnic_cm_close_req(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+			   u8 opcode)
+{
+	struct cnic_ulp_ops *ulp_ops;
+	int ulp_type = csk->ulp_type;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (ulp_ops) {
+		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+			ulp_ops->cm_connect_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			ulp_ops->cm_close_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+			ulp_ops->cm_remote_abort(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+			ulp_ops->cm_abort_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+			ulp_ops->cm_remote_close(csk);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+	if (cnic_offld_prep(csk)) {
+		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			cnic_cm_update_pg(csk);
+		else
+			cnic_cm_offload_pg(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 l5_cid = kcqe->pg_host_opaque;
+	u8 opcode = kcqe->op_code;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+	csk_hold(csk);
+	if (!cnic_in_use(csk))
+		goto done;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		goto done;
+	}
+	csk->pg_cid = kcqe->pg_cid;
+	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	cnic_cm_conn_req(csk);
+
+done:
+	csk_put(csk);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+	u8 opcode = l4kcqe->op_code;
+	u32 l5_cid;
+	struct cnic_sock *csk;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		cnic_cm_process_offld_pg(dev, l4kcqe);
+		return;
+	}
+
+	l5_cid = l4kcqe->conn_id;
+	if (opcode & 0x80)
+		l5_cid = l4kcqe->cid;
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return;
+
+	csk = &cp->csk_tbl[l5_cid];
+	csk_hold(csk);
+
+	if (!cnic_in_use(csk)) {
+		csk_put(csk);
+		return;
+	}
+
+	switch (opcode) {
+	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+		if (l4kcqe->status == 0)
+			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+
+		smp_mb__before_clear_bit();
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+		if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
+			csk->state = opcode;
+		/* fall through */
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+		cp->close_conn(csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+	}
+	csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+	struct cnic_dev *dev = data;
+	int i;
+
+	for (i = 0; i < num; i++)
+		cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+	.indicate_kcqes		= cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	kfree(cp->csk_tbl);
+	cp->csk_tbl = NULL;
+	cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+			      GFP_KERNEL);
+	if (!cp->csk_tbl)
+		return -ENOMEM;
+
+	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+			     CNIC_LOCAL_PORT_MIN)) {
+		cnic_cm_free_mem(dev);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+	if ((opcode == csk->state) ||
+	    (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
+	     csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
+		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
+			return 1;
+	}
+	return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	if (cnic_ready_to_close(csk, opcode)) {
+		cnic_close_conn(csk);
+		cnic_cm_upcall(cp, csk, opcode);
+	}
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+	u32 seed;
+
+	get_random_bytes(&seed, 4);
+	cnic_ctx_wr(dev, 45, 0, seed);
+	return 0;
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int err;
+
+	err = cnic_cm_alloc_mem(dev);
+	if (err)
+		return err;
+
+	err = cp->start_cm(dev);
+
+	if (err)
+		goto err_out;
+
+	dev->cm_create = cnic_cm_create;
+	dev->cm_destroy = cnic_cm_destroy;
+	dev->cm_connect = cnic_cm_connect;
+	dev->cm_abort = cnic_cm_abort;
+	dev->cm_close = cnic_cm_close;
+	dev->cm_select_dev = cnic_cm_select_dev;
+
+	cp->ulp_handle[CNIC_ULP_L4] = dev;
+	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+	return 0;
+
+err_out:
+	cnic_cm_free_mem(dev);
+	return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	cp->stop_cm(dev);
+
+	if (!cp->csk_tbl)
+		return 0;
+
+	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+		struct cnic_sock *csk = &cp->csk_tbl[i];
+
+		clear_bit(SK_F_INUSE, &csk->flags);
+		cnic_cm_cleanup(csk);
+	}
+	cnic_cm_free_mem(dev);
+
+	return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 cid_addr;
+	int i;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5709)
+		return;
+
+	cid_addr = GET_CID_ADDR(cid);
+
+	for (i = 0; i < CTX_SIZE; i += 4)
+		cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret = 0, i;
+	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		return 0;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		int j;
+		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+		u32 val;
+
+		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+			(u64) cp->ctx_arr[i].mapping >> 32);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		cp->disable_int_sync(dev);
+		tasklet_disable(&cp->cnic_irq_task);
+		free_irq(ethdev->irq_arr[0].vector, dev);
+	}
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		int err, i = 0;
+		int sblk_num = cp->status_blk_num;
+		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+			   BNX2_HC_SB_CONFIG_1;
+
+		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+		cp->bnx2_status_blk = cp->status_blk;
+		cp->last_status_idx = cp->bnx2_status_blk->status_idx;
+		tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
+			     (unsigned long) dev);
+		err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
+				  "cnic", dev);
+		if (err) {
+			tasklet_disable(&cp->cnic_irq_task);
+			return err;
+		}
+		while (cp->bnx2_status_blk->status_completion_producer_index &&
+		       i < 10) {
+			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+				1 << (11 + sblk_num));
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (cp->bnx2_status_blk->status_completion_producer_index) {
+			cnic_free_irq(dev);
+			goto failed;
+		}
+
+	} else {
+		struct status_block *sblk = cp->status_blk;
+		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+		int i = 0;
+
+		while (sblk->status_completion_producer_index && i < 10) {
+			CNIC_WR(dev, BNX2_HC_COMMAND,
+				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (sblk->status_completion_producer_index)
+			goto failed;
+
+	}
+	return 0;
+
+failed:
+	printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
+	       dev->netdev->name);
+	return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+	synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 cid_addr, tx_cid, sb_id;
+	u32 val, offset0, offset1, offset2, offset3;
+	int i;
+	struct tx_bd *txbd;
+	dma_addr_t buf_map;
+	struct status_block *s_blk = cp->status_blk;
+
+	sb_id = cp->status_blk_num;
+	tx_cid = 20;
+	cnic_init_context(dev, tx_cid);
+	cnic_init_context(dev, tx_cid + 1);
+	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk;
+
+		tx_cid = TX_TSS_CID + sb_id - 1;
+		cnic_init_context(dev, tx_cid);
+		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+			(TX_TSS_CID << 7));
+		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+	}
+	cp->tx_cons = *cp->tx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(tx_cid);
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+		for (i = 0; i < PHY_CTX_SIZE; i += 4)
+			cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+	txbd = (struct tx_bd *) cp->l2_ring;
+
+	buf_map = cp->l2_buf_map;
+	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) cp->l2_ring_map >> 32;
+	cnic_ctx_wr(dev, cid_addr, offset2, val);
+	txbd->tx_bd_haddr_hi = val;
+
+	val = (u64) cp->l2_ring_map & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, offset3, val);
+	txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 cid_addr, sb_id, val, coal_reg, coal_val;
+	int i;
+	struct rx_bd *rxbd;
+	struct status_block *s_blk = cp->status_blk;
+
+	sb_id = cp->status_blk_num;
+	cnic_init_context(dev, 2);
+	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+	coal_reg = BNX2_HC_COMMAND;
+	coal_val = CNIC_RD(dev, coal_reg);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk;
+
+		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+		coal_reg = BNX2_HC_COALESCE_NOW;
+		coal_val = 1 << (11 + sb_id);
+	}
+	i = 0;
+	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+		CNIC_WR(dev, coal_reg, coal_val);
+		udelay(10);
+		i++;
+		barrier();
+	}
+	cp->rx_cons = *cp->rx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(2);
+	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+	if (sb_id == 0)
+		val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
+	else
+		val = BNX2_L2CTX_STATUSB_NUM(sb_id);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+	rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
+	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->rx_bd_len = cp->l2_single_buf_size;
+		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+	rxbd->rx_bd_haddr_hi = val;
+
+	val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+	rxbd->rx_bd_haddr_lo = val;
+
+	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct kwqe *wqes[1], l2kwqe;
+
+	memset(&l2kwqe, 0, sizeof(l2kwqe));
+	wqes[0] = &l2kwqe;
+	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
+			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
+			       KWQE_OPCODE_SHIFT) | 2;
+	dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 val;
+
+	val = cp->func << 2;
+
+	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+	dev->mac_addr[0] = (u8) (val >> 8);
+	dev->mac_addr[1] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+	dev->mac_addr[2] = (u8) (val >> 24);
+	dev->mac_addr[3] = (u8) (val >> 16);
+	dev->mac_addr[4] = (u8) (val >> 8);
+	dev->mac_addr[5] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct status_block *sblk = cp->status_blk;
+	u32 val;
+	int err;
+
+	cnic_set_bnx2_mac(dev);
+
+	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+	if (BCM_PAGE_BITS > 12)
+		val |= (12 - 8)  << 4;
+	else
+		val |= (BCM_PAGE_BITS - 8)  << 4;
+
+	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+	err = cnic_setup_5709_context(dev, 1);
+	if (err)
+		return err;
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->max_kwq_idx = MAX_KWQ_IDX;
+	cp->kwq_prod_idx = 0;
+	cp->kwq_con_idx = 0;
+	cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+	else
+		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+	/* Initialize the kernel work queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kwq_info.pgtbl_map;
+	cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+	cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->kcq_prod_idx = 0;
+
+	/* Initialize the kernel complete queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kcq_info.pgtbl_map;
+	cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->int_num = 0;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		u32 sb_id = cp->status_blk_num;
+		u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
+
+		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+		cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+		cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+	}
+
+	/* Enable Commnad Scheduler notification when we write to the
+	 * host producer index of the kernel contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+	/* Enable Command Scheduler notification when we write to either
+	 * the Send Queue or Receive Queue producer indexes of the kernel
+	 * bypass contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+	/* Notify COM when the driver post an application buffer. */
+	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+	/* Set the CP and COM doorbells.  These two processors polls the
+	 * doorbell for a non zero value before running.  This must be done
+	 * after setting up the kernel queue contexts. */
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	err = cnic_init_bnx2_irq(dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
+		       dev->netdev->name);
+		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+		return err;
+	}
+
+	return 0;
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EALREADY;
+
+	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: register_cnic failed\n",
+		       dev->netdev->name);
+		goto err2;
+	}
+
+	dev->regview = ethdev->io_base;
+	cp->chip_id = ethdev->chip_id;
+	pci_dev_get(dev->pcidev);
+	cp->func = PCI_FUNC(dev->pcidev->devfn);
+	cp->status_blk = ethdev->irq_arr[0].status_blk;
+	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+	err = cp->alloc_resc(dev);
+	if (err) {
+		printk(KERN_ERR PFX "%s: allocate resource failure\n",
+		       dev->netdev->name);
+		goto err1;
+	}
+
+	err = cp->start_hw(dev);
+	if (err)
+		goto err1;
+
+	err = cnic_cm_open(dev);
+	if (err)
+		goto err1;
+
+	set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+	cp->enable_int(dev);
+
+	return 0;
+
+err1:
+	ethdev->drv_unregister_cnic(dev->netdev);
+	cp->free_resc(dev);
+	pci_dev_put(dev->pcidev);
+err2:
+	return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	cnic_disable_bnx2_int_sync(dev);
+
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cnic_setup_5709_context(dev, 0);
+	cnic_free_irq(dev);
+
+	ethdev->drv_unregister_cnic(dev->netdev);
+
+	cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+		rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+		synchronize_rcu();
+		cnic_cm_shutdown(dev);
+		cp->stop_hw(dev);
+		pci_dev_put(dev->pcidev);
+	}
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+	int i = 0;
+
+	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+		msleep(100);
+		i++;
+	}
+	if (atomic_read(&dev->ref_count) != 0)
+		printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
+				    " to zero.\n", dev->netdev->name);
+
+	printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
+	dev_put(dev->netdev);
+	kfree(dev);
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+				       struct pci_dev *pdev)
+{
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	int alloc_size;
+
+	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+	cdev = kzalloc(alloc_size , GFP_KERNEL);
+	if (cdev == NULL) {
+		printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
+		       dev->name);
+		return NULL;
+	}
+
+	cdev->netdev = dev;
+	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+	cdev->register_device = cnic_register_device;
+	cdev->unregister_device = cnic_unregister_device;
+	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+
+	cp = cdev->cnic_priv;
+	cp->dev = cdev;
+	cp->uio_dev = -1;
+	cp->l2_single_buf_size = 0x400;
+	cp->l2_rx_ring_size = 3;
+
+	spin_lock_init(&cp->cnic_ulp_lock);
+
+	printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
+
+	return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct cnic_eth_dev *ethdev = NULL;
+	struct cnic_eth_dev *(*probe)(void *) = NULL;
+
+	probe = __symbol_get("bnx2_cnic_probe");
+	if (probe) {
+		ethdev = (*probe)(dev);
+		symbol_put_addr(probe);
+	}
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	pci_dev_get(pdev);
+	if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+	    pdev->device == PCI_DEVICE_ID_NX2_5709S) {
+		u8 rev;
+
+		pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+		if (rev < 0x10) {
+			pci_dev_put(pdev);
+			goto cnic_err;
+		}
+	}
+	pci_dev_put(pdev);
+
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL)
+		goto cnic_err;
+
+	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+
+	cp->cnic_ops = &cnic_bnx2_ops;
+	cp->start_hw = cnic_start_bnx2_hw;
+	cp->stop_hw = cnic_stop_bnx2_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl;
+	cp->alloc_resc = cnic_alloc_bnx2_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2_hw;
+	cp->enable_int = cnic_enable_bnx2_int;
+	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+	cp->close_conn = cnic_close_bnx2_conn;
+	cp->next_idx = cnic_bnx2_next_idx;
+	cp->hw_idx = cnic_bnx2_hw_idx;
+	return cdev;
+
+cnic_err:
+	dev_put(dev);
+	return NULL;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+	struct ethtool_drvinfo drvinfo;
+	struct cnic_dev *cdev = NULL;
+
+	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+		if (!strcmp(drvinfo.driver, "bnx2"))
+			cdev = init_bnx2_cnic(dev);
+		if (cdev) {
+			write_lock(&cnic_dev_lock);
+			list_add(&cdev->list, &cnic_dev_list);
+			write_unlock(&cnic_dev_lock);
+		}
+	}
+	return cdev;
+}
+
+/**
+ * netdev event handler
+ */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+							 void *ptr)
+{
+	struct net_device *netdev = ptr;
+	struct cnic_dev *dev;
+	int if_type;
+	int new_dev = 0;
+
+	dev = cnic_from_netdev(netdev);
+
+	if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
+		/* Check for the hot-plug device */
+		dev = is_cnic_dev(netdev);
+		if (dev) {
+			new_dev = 1;
+			cnic_hold(dev);
+		}
+	}
+	if (dev) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (new_dev)
+			cnic_ulp_init(dev);
+		else if (event == NETDEV_UNREGISTER)
+			cnic_ulp_exit(dev);
+		else if (event == NETDEV_UP) {
+			mutex_lock(&cnic_lock);
+			if (!cnic_start_hw(dev))
+				cnic_ulp_start(dev);
+			mutex_unlock(&cnic_lock);
+		}
+
+		rcu_read_lock();
+		for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+			struct cnic_ulp_ops *ulp_ops;
+			void *ctx;
+
+			ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+			if (!ulp_ops || !ulp_ops->indicate_netevent)
+				continue;
+
+			ctx = cp->ulp_handle[if_type];
+
+			ulp_ops->indicate_netevent(ctx, event);
+		}
+		rcu_read_unlock();
+
+		if (event == NETDEV_GOING_DOWN) {
+			mutex_lock(&cnic_lock);
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+			mutex_unlock(&cnic_lock);
+		} else if (event == NETDEV_UNREGISTER) {
+			write_lock(&cnic_dev_lock);
+			list_del_init(&dev->list);
+			write_unlock(&cnic_dev_lock);
+
+			cnic_put(dev);
+			cnic_free_dev(dev);
+			goto done;
+		}
+		cnic_put(dev);
+	}
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+	.notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+	struct cnic_dev *dev;
+
+	while (!list_empty(&cnic_dev_list)) {
+		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
+		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+		}
+
+		cnic_ulp_exit(dev);
+		list_del_init(&dev->list);
+		cnic_free_dev(dev);
+	}
+}
+
+static int __init cnic_init(void)
+{
+	int rc = 0;
+
+	printk(KERN_INFO "%s", version);
+
+	rc = register_netdevice_notifier(&cnic_netdev_notifier);
+	if (rc) {
+		cnic_release();
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+	unregister_netdevice_notifier(&cnic_netdev_notifier);
+	cnic_release();
+	return;
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 0000000..5192d4a
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
+/* cnic.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define KWQ_PAGE_CNT	4
+#define KCQ_PAGE_CNT	16
+
+#define KWQ_CID 		24
+#define KCQ_CID 		25
+
+/*
+ *	krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS	0x00000000
+#define L5_KRNLQ_SIZE	0x00000000
+#define L5_KRNLQ_TYPE	0x00000000
+#define KRNLQ_FLAGS_PG_SZ					(0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256					(0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512					(1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K					(2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K					(3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K					(4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K					(5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K					(6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K					(7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K					(8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K					(9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K					(10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K					(11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M					(12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M					(13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ					(1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE	((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE						(0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY					(0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ					(6<<28)
+
+#define L5_KRNLQ_HOST_QIDX		0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX		0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ 	0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX	0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI 	0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO 	0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX		0x00000018
+#define L5_KRNLQ_NX_PG_QIDX 		0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES		0x0000001c
+#define L5_KRNLQ_QIDX_INCR		0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI 	0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO 	0x00000024
+
+#define BNX2_PG_CTX_MAP			0x1a0034
+#define BNX2_ISCSI_CTX_MAP		0x1a0074
+
+struct cnic_redirect_entry {
+	struct dst_entry *old_dst;
+	struct dst_entry *new_dst;
+};
+
+#define MAX_COMPLETED_KCQE	64
+
+#define MAX_CNIC_L5_CONTEXT	256
+
+#define MAX_CM_SK_TBL_SZ	MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ	256
+
+#define CNIC_LOCAL_PORT_MIN	60000
+#define CNIC_LOCAL_PORT_MAX	61000
+#define CNIC_LOCAL_PORT_RANGE	(CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX	((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX	((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) ==		\
+		(MAX_KCQE_CNT - 1)) ?					\
+		(x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x)						\
+	&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_COUNT		0xc001
+
+#define DEF_KA_TIMEOUT		10000
+#define DEF_KA_INTERVAL		300000
+#define DEF_KA_MAX_PROBE_COUNT	3
+#define DEF_TOS			0
+#define DEF_TTL			0xfe
+#define DEF_SND_SEQ_SCALE	0
+#define DEF_RCV_BUF		0xffff
+#define DEF_SND_BUF		0xffff
+#define DEF_SEED		0
+#define DEF_MAX_RT_TIME		500
+#define DEF_MAX_DA_COUNT	2
+#define DEF_SWS_TIMER		1000
+#define DEF_MAX_CWND		0xffff
+
+struct cnic_ctx {
+	u32		cid;
+	void		*ctx;
+	dma_addr_t	mapping;
+};
+
+#define BNX2_MAX_CID		0x2000
+
+struct cnic_dma {
+	int		num_pages;
+	void		**pg_arr;
+	dma_addr_t	*pg_map_arr;
+	int		pgtbl_size;
+	u32		*pgtbl;
+	dma_addr_t	pgtbl_map;
+};
+
+struct cnic_id_tbl {
+	spinlock_t	lock;
+	u32		start;
+	u32		max;
+	u32		next;
+	unsigned long	*table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE	128
+
+struct kwqe_16_data {
+	u8	data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+	struct cnic_dma		task_array_info;
+	struct cnic_dma		r2tq_info;
+	struct cnic_dma		hq_info;
+};
+
+struct cnic_context {
+	u32			cid;
+	struct kwqe_16_data	*kwqe_data;
+	dma_addr_t		kwqe_data_mapping;
+	wait_queue_head_t	waitq;
+	int			wait_cond;
+	unsigned long		timestamp;
+	u32			ctx_flags;
+#define	CTX_FL_OFFLD_START	0x00000001
+	u8			ulp_proto_id;
+	union {
+		struct cnic_iscsi	*iscsi;
+	} proto;
+};
+
+struct cnic_local {
+
+	spinlock_t cnic_ulp_lock;
+	void *ulp_handle[MAX_CNIC_ULP_TYPE];
+	unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT	0
+#define ULP_F_START	1
+	struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+	/* protected by ulp_lock */
+	u32 cnic_local_flags;
+#define	CNIC_LCL_FL_KWQ_INIT	0x00000001
+
+	struct cnic_dev *dev;
+
+	struct cnic_eth_dev *ethdev;
+
+	void		*l2_ring;
+	dma_addr_t	l2_ring_map;
+	int		l2_ring_size;
+	int		l2_rx_ring_size;
+
+	void		*l2_buf;
+	dma_addr_t	l2_buf_map;
+	int		l2_buf_size;
+	int		l2_single_buf_size;
+
+	u16		*rx_cons_ptr;
+	u16		*tx_cons_ptr;
+	u16		rx_cons;
+	u16		tx_cons;
+
+	u32 kwq_cid_addr;
+	u32 kcq_cid_addr;
+
+	struct cnic_dma		kwq_info;
+	struct kwqe		**kwq;
+
+	struct cnic_dma		kwq_16_data_info;
+
+	u16		max_kwq_idx;
+
+	u16		kwq_prod_idx;
+	u32		kwq_io_addr;
+
+	u16		*kwq_con_idx_ptr;
+	u16		kwq_con_idx;
+
+	struct cnic_dma	kcq_info;
+	struct kcqe	**kcq;
+
+	u16		kcq_prod_idx;
+	u32		kcq_io_addr;
+
+	void				*status_blk;
+	struct status_block_msix	*bnx2_status_blk;
+	struct host_status_block	*bnx2x_status_blk;
+
+	u32				status_blk_num;
+	u32				int_num;
+	u32				last_status_idx;
+	struct tasklet_struct		cnic_irq_task;
+
+	struct kcqe		*completed_kcq[MAX_COMPLETED_KCQE];
+
+	struct cnic_sock	*csk_tbl;
+	struct cnic_id_tbl	csk_port_tbl;
+
+	struct cnic_dma		conn_buf_info;
+	struct cnic_dma		gbl_buf_info;
+
+	struct cnic_iscsi	*iscsi_tbl;
+	struct cnic_context	*ctx_tbl;
+	struct cnic_id_tbl	cid_tbl;
+	int			max_iscsi_conn;
+	atomic_t		iscsi_conn;
+
+	/* per connection parameters */
+	int			num_iscsi_tasks;
+	int			num_ccells;
+	int			task_array_size;
+	int			r2tq_size;
+	int			hq_size;
+	int			num_cqs;
+
+	struct cnic_ctx		*ctx_arr;
+	int			ctx_blks;
+	int			ctx_blk_size;
+	int			cids_per_blk;
+
+	u32			chip_id;
+	int			func;
+	u32			shmem_base;
+
+	u32			uio_dev;
+	struct uio_info		*cnic_uinfo;
+
+	struct cnic_ops		*cnic_ops;
+	int			(*start_hw)(struct cnic_dev *);
+	void			(*stop_hw)(struct cnic_dev *);
+	void			(*setup_pgtbl)(struct cnic_dev *,
+					       struct cnic_dma *);
+	int			(*alloc_resc)(struct cnic_dev *);
+	void			(*free_resc)(struct cnic_dev *);
+	int			(*start_cm)(struct cnic_dev *);
+	void			(*stop_cm)(struct cnic_dev *);
+	void			(*enable_int)(struct cnic_dev *);
+	void			(*disable_int_sync)(struct cnic_dev *);
+	void			(*ack_int)(struct cnic_dev *);
+	void			(*close_conn)(struct cnic_sock *, u32 opcode);
+	u16			(*next_idx)(u16);
+	u16			(*hw_idx)(u16);
+};
+
+struct bnx2x_bd_chain_next {
+	u32	addr_lo;
+	u32	addr_hi;
+	u8	reserved[8];
+};
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN		(ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT		(ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#endif
+
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 0000000..cee80f6
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
+
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH                  (4)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1               (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2               (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3               (52)
+#define L4_KWQE_OPCODE_VALUE_RESET                  (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE                  (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET          (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE			(0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT		(L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE		(L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT		(L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE	(L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD	(L5CM_RAMROD_CMD_ID_BASE + 15)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP             (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP             (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE          (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE       (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED         (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED         (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS		    (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT        (0x93)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+	u32 cid;
+	u32 pg_cid;
+	u32 conn_id;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u16 status;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u16 status;
+#endif
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+	u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+	u16 pg_status;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u16 pg_status;
+#endif
+	u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 pg_cid;
+	u32 src_ip;
+	u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+	u16 dst_port;
+	u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 src_port;
+	u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rsrv1[3];
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+	u8 rsrv1[3];
+#endif
+	u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rsrv;
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 reserved2;
+	u32 src_ip_v6_2;
+	u32 src_ip_v6_3;
+	u32 src_ip_v6_4;
+	u32 dst_ip_v6_2;
+	u32 dst_ip_v6_3;
+	u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 ka_timeout;
+	u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+	u8 snd_seq_scale;
+	u8 ttl;
+	u8 tos;
+	u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_max_probe_count;
+	u8 tos;
+	u8 ttl;
+	u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pmtu;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u16 pmtu;
+#endif
+	u32 rcv_buf;
+	u32 snd_buf;
+	u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 l2hdr_nbytes;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa0;
+	u8 sa1;
+	u8 sa2;
+	u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sa3;
+	u8 sa2;
+	u8 sa1;
+	u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa4;
+	u8 sa5;
+	u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+	u16 etype;
+	u8 sa5;
+	u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 vlan_tag;
+	u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+	u16 ipid_start;
+	u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ipid_count;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 ipid_count;
+#endif
+	u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 pg_cid;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+	u8 pg_unused_a;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u8 pg_unused_a;
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserverd3;
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+	u32 reserved4;
+	u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 0000000..0638096
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
+/* cnic_if.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#define CNIC_MODULE_VERSION	"2.0.0"
+#define CNIC_MODULE_RELDATE	"May 21, 2009"
+
+#define CNIC_ULP_RDMA		0
+#define CNIC_ULP_ISCSI		1
+#define CNIC_ULP_L4		2
+#define MAX_CNIC_ULP_TYPE_EXT	2
+#define MAX_CNIC_ULP_TYPE	3
+
+struct kwqe {
+	u32 kwqe_op_flag;
+
+#define KWQE_OPCODE_MASK	0x00ff0000
+#define KWQE_OPCODE_SHIFT	16
+#define KWQE_FLAGS_LAYER_SHIFT	28
+#define KWQE_OPCODE(x)		((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+	u32 kwqe_info4;
+	u32 kwqe_info5;
+	u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+};
+
+struct kcqe {
+	u32 kcqe_info0;
+	u32 kcqe_info1;
+	u32 kcqe_info2;
+	u32 kcqe_info3;
+	u32 kcqe_info4;
+	u32 kcqe_info5;
+	u32 kcqe_info6;
+	u32 kcqe_op_flag;
+		#define KCQE_RAMROD_COMPLETION		(0x1<<27) /* Everest */
+		#define KCQE_FLAGS_LAYER_MASK		(0x7<<28)
+		#define KCQE_FLAGS_LAYER_MASK_MISC	(0<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L2	(2<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L3	(3<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L4	(4<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_RDMA	(5<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI	(6<<28)
+		#define KCQE_FLAGS_NEXT 		(1<<31)
+		#define KCQE_FLAGS_OPCODE_MASK		(0xff<<16)
+		#define KCQE_FLAGS_OPCODE_SHIFT		(16)
+		#define KCQE_OPCODE(op)			\
+		(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA	64
+#define MAX_DRV_CTL_DATA	64
+
+#define CNIC_CTL_STOP_CMD		1
+#define CNIC_CTL_START_CMD		2
+#define CNIC_CTL_COMPLETION_CMD		3
+
+#define DRV_CTL_IO_WR_CMD		0x101
+#define DRV_CTL_IO_RD_CMD		0x102
+#define DRV_CTL_CTX_WR_CMD		0x103
+#define DRV_CTL_CTXTBL_WR_CMD		0x104
+#define DRV_CTL_COMPLETION_CMD		0x105
+
+struct cnic_ctl_completion {
+	u32	cid;
+};
+
+struct drv_ctl_completion {
+	u32	comp_count;
+};
+
+struct cnic_ctl_info {
+	int	cmd;
+	union {
+		struct cnic_ctl_completion comp;
+		char bytes[MAX_CNIC_CTL_DATA];
+	} data;
+};
+
+struct drv_ctl_io {
+	u32		cid_addr;
+	u32		offset;
+	u32		data;
+	dma_addr_t	dma_addr;
+};
+
+struct drv_ctl_info {
+	int	cmd;
+	union {
+		struct drv_ctl_completion comp;
+		struct drv_ctl_io io;
+		char bytes[MAX_DRV_CTL_DATA];
+	} data;
+};
+
+struct cnic_ops {
+	struct module	*cnic_owner;
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+	int		(*cnic_handler)(void *, void *);
+	int		(*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC	8
+
+struct cnic_irq {
+	unsigned int	vector;
+	void		*status_blk;
+	u32		status_blk_num;
+	u32		irq_flags;
+#define CNIC_IRQ_FL_MSIX		0x00000001
+};
+
+struct cnic_eth_dev {
+	struct module	*drv_owner;
+	u32		drv_state;
+#define CNIC_DRV_STATE_REGD		0x00000001
+#define CNIC_DRV_STATE_USING_MSIX	0x00000002
+	u32		chip_id;
+	u32		max_kwqe_pending;
+	struct pci_dev	*pdev;
+	void __iomem	*io_base;
+
+	u32		ctx_tbl_offset;
+	u32		ctx_tbl_len;
+	int		ctx_blk_size;
+	u32		starting_cid;
+	u32		max_iscsi_conn;
+	u32		max_fcoe_conn;
+	u32		max_rdma_conn;
+	u32		reserved0[2];
+
+	int		num_irq;
+	struct cnic_irq	irq_arr[MAX_CNIC_VEC];
+	int		(*drv_register_cnic)(struct net_device *,
+					     struct cnic_ops *, void *);
+	int		(*drv_unregister_cnic)(struct net_device *);
+	int		(*drv_submit_kwqes_32)(struct net_device *,
+					       struct kwqe *[], u32);
+	int		(*drv_submit_kwqes_16)(struct net_device *,
+					       struct kwqe_16 *[], u32);
+	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+	unsigned long	reserved1[2];
+};
+
+struct cnic_sockaddr {
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} local;
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} remote;
+};
+
+struct cnic_sock {
+	struct cnic_dev *dev;
+	void	*context;
+	u32	src_ip[4];
+	u32	dst_ip[4];
+	u16	src_port;
+	u16	dst_port;
+	u16	vlan_id;
+	unsigned char old_ha[6];
+	unsigned char ha[6];
+	u32	mtu;
+	u32	cid;
+	u32	l5_cid;
+	u32	pg_cid;
+	int	ulp_type;
+
+	u32	ka_timeout;
+	u32	ka_interval;
+	u8	ka_max_probe_count;
+	u8	tos;
+	u8	ttl;
+	u8	snd_seq_scale;
+	u32	rcv_buf;
+	u32	snd_buf;
+	u32	seed;
+
+	unsigned long	tcp_flags;
+#define SK_TCP_NO_DELAY_ACK	0x1
+#define SK_TCP_KEEP_ALIVE	0x2
+#define SK_TCP_NAGLE		0x4
+#define SK_TCP_TIMESTAMP	0x8
+#define SK_TCP_SACK		0x10
+#define SK_TCP_SEG_SCALING	0x20
+	unsigned long	flags;
+#define SK_F_INUSE		0
+#define SK_F_OFFLD_COMPLETE	1
+#define SK_F_OFFLD_SCHED	2
+#define SK_F_PG_OFFLD_COMPLETE	3
+#define SK_F_CONNECT_START	4
+#define SK_F_IPV6		5
+#define SK_F_CLOSING		7
+
+	atomic_t ref_count;
+	u32 state;
+	struct kwqe kwqe1;
+	struct kwqe kwqe2;
+	struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+	struct net_device	*netdev;
+	struct pci_dev		*pcidev;
+	void __iomem		*regview;
+	struct list_head	list;
+
+	int (*register_device)(struct cnic_dev *dev, int ulp_type,
+			       void *ulp_ctx);
+	int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+	int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num_wqes);
+	int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+				u32 num_wqes);
+
+	int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+			 void *);
+	int (*cm_destroy)(struct cnic_sock *);
+	int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+	int (*cm_abort)(struct cnic_sock *);
+	int (*cm_close)(struct cnic_sock *);
+	struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+	int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+				 char *data, u16 data_size);
+	unsigned long	flags;
+#define CNIC_F_CNIC_UP		1
+#define CNIC_F_BNX2_CLASS	3
+#define CNIC_F_BNX2X_CLASS	4
+	atomic_t	ref_count;
+	u8		mac_addr[6];
+
+	int		max_iscsi_conn;
+	int		max_fcoe_conn;
+	int		max_rdma_conn;
+
+	void		*cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val)		writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val)	writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val)		writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off)		readl(dev->regview + off)
+#define CNIC_RD16(dev, off)		readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+
+	void (*cnic_init)(struct cnic_dev *dev);
+	void (*cnic_exit)(struct cnic_dev *dev);
+	void (*cnic_start)(void *ulp_ctx);
+	void (*cnic_stop)(void *ulp_ctx);
+	void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+				u32 num_cqes);
+	void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
+	void (*cm_connect_complete)(struct cnic_sock *);
+	void (*cm_close_complete)(struct cnic_sock *);
+	void (*cm_abort_complete)(struct cnic_sock *);
+	void (*cm_remote_close)(struct cnic_sock *);
+	void (*cm_remote_abort)(struct cnic_sock *);
+	void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
+				  char *data, u16 data_size);
+	struct module *owner;
+};
+
+extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+extern int cnic_unregister_driver(int ulp_type);
+
+#endif
-- 
1.5.6.GIT





^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] bnx2i: Add bnx2i iSCSI driver.
  2009-05-23 21:11 ` [PATCH 4/4] bnx2i: Add bnx2i iSCSI driver Michael Chan
@ 2009-05-26 16:37   ` Grant Grundler
  2009-05-26 16:49     ` Michael Chan
  0 siblings, 1 reply; 11+ messages in thread
From: Grant Grundler @ 2009-05-26 16:37 UTC (permalink / raw)
  To: Michael Chan
  Cc: James.Bottomley, michaelc, davem, linux-scsi, open-iscsi, anilgv, benli

On Sat, May 23, 2009 at 2:11 PM, Michael Chan <mchan@broadcom.com> wrote:
> New iSCSI driver for Broadcom BNX2 devices.
...
> +/*
> + * iSCSI Async CQE
> + */
> +struct bnx2i_async_msg {
...
> +#if defined(__BIG_ENDIAN)
> +       u8 async_event;
> +       u8 async_vcode;
> +       u16 param1;
> +#elif defined(__LITTLE_ENDIAN)
> +       u16 param1;
> +       u8 async_vcode;
> +       u8 async_event;
> +#endif
...

Michael,
I'm feeling a bit dense and am not seeing why byte data
would have to worry about the 32-bit word endianess of the CPU.
Can you give an example of why defined(__BIG_ENDIAN) is needed?

Normally the _*ENDIAN defines are used for bit fields, not byte fields.

Byte data addressable by the CPU (e.g. host memory) is at the same offset
regardless of endianness of the CPU. I feel like I'm missing
something that should be obvious.

thanks,
grant

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] bnx2i: Add bnx2i iSCSI driver.
  2009-05-26 16:37   ` Grant Grundler
@ 2009-05-26 16:49     ` Michael Chan
  2009-05-26 17:03       ` Grant Grundler
  0 siblings, 1 reply; 11+ messages in thread
From: Michael Chan @ 2009-05-26 16:49 UTC (permalink / raw)
  To: Grant Grundler
  Cc: James.Bottomley, michaelc, davem, linux-scsi, open-iscsi,
	Anil Veerabhadrappa, Benjamin Li


On Tue, 2009-05-26 at 09:37 -0700, Grant Grundler wrote:
> On Sat, May 23, 2009 at 2:11 PM, Michael Chan <mchan@broadcom.com> wrote:
> > New iSCSI driver for Broadcom BNX2 devices.
> ...
> > +/*
> > + * iSCSI Async CQE
> > + */
> > +struct bnx2i_async_msg {
> ...
> > +#if defined(__BIG_ENDIAN)
> > +       u8 async_event;
> > +       u8 async_vcode;
> > +       u16 param1;
> > +#elif defined(__LITTLE_ENDIAN)
> > +       u16 param1;
> > +       u8 async_vcode;
> > +       u8 async_event;
> > +#endif
> ...
> 
> Michael,
> I'm feeling a bit dense and am not seeing why byte data
> would have to worry about the 32-bit word endianess of the CPU.
> Can you give an example of why defined(__BIG_ENDIAN) is needed?
> 
> Normally the _*ENDIAN defines are used for bit fields, not byte fields.
> 
> Byte data addressable by the CPU (e.g. host memory) is at the same offset
> regardless of endianness of the CPU. I feel like I'm missing
> something that should be obvious.

Hi Grant, these are what we call "DMA control structures" as opposed to
DMA packet data.  Our chips are configured to do an additional 32-bit
endian swap on all DMA control structures.  This way, all 32-bit control
fields (such as length, status, etc) will come out right when the driver
reads these fields.

If everything was defined as u32 in all these control structures, we
wouldn't have to add the #ifdef.  u8 and u16 fields have to be defined
this way or else big endian CPUs would read the wrong offset.

If you look at some of the control structures in tg3.h, you'll see the
same thing.

Thanks.



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] bnx2i: Add bnx2i iSCSI driver.
  2009-05-26 16:49     ` Michael Chan
@ 2009-05-26 17:03       ` Grant Grundler
  0 siblings, 0 replies; 11+ messages in thread
From: Grant Grundler @ 2009-05-26 17:03 UTC (permalink / raw)
  To: Michael Chan
  Cc: James.Bottomley, michaelc, davem, linux-scsi, open-iscsi,
	Anil Veerabhadrappa, Benjamin Li

On Tue, May 26, 2009 at 9:49 AM, Michael Chan <mchan@broadcom.com> wrote:
...
> Hi Grant, these are what we call "DMA control structures" as opposed to
> DMA packet data.  Our chips are configured to do an additional 32-bit
> endian swap on all DMA control structures.  This way, all 32-bit control
> fields (such as length, status, etc) will come out right when the driver
> reads these fields.
>
> If everything was defined as u32 in all these control structures, we
> wouldn't have to add the #ifdef.  u8 and u16 fields have to be defined
> this way or else big endian CPUs would read the wrong offset.
>
> If you look at some of the control structures in tg3.h, you'll see the
> same thing.

Ah ok...so the chip is programmed configured to know about host endianess.
I'm familiar with tg3 having that and just hadn't noticed it here yet.

thanks,
grant
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 0/4] Add bnx2i driver.
       [not found] ` <1243113110-29635-1-git-send-email-mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
  2009-05-23 21:11   ` [PATCH 3/4] cnic: Add new Broadcom " Michael Chan
@ 2009-05-27  2:35   ` Mike Christie
  1 sibling, 0 replies; 11+ messages in thread
From: Mike Christie @ 2009-05-27  2:35 UTC (permalink / raw)
  To: open-iscsi-/JYPxA39Uh5TLH3MbocFFw
  Cc: James.Bottomley-d9PhHud1JfjCXq6kfMZ53/egYHeGw8Jk,
	davem-fT/PcQaiUtIeIZ0/mPfg9Q, linux-scsi-u79uwXL29TY76Z2rM5mHXA,
	anilgv-dY08KVG/lbpWk0Htik3J/w, benli-dY08KVG/lbpWk0Htik3J/w


Michael Chan wrote:
> 
> James, the next 4 patches are our latest to add the bnx2i
> driver.  We've fixed up all the issues that Mike brought up.
> Please consider applying them to scsi-misc-2.6.
> 

Hey James,

We forgot to cc you and the linux list on some other postings. Dave 
Miller had ackd the drivers/net parts of the patches here:
http://marc.info/?l=linux-netdev&m=124082705613349&w=2
and in the mail had said it was ok to send through the scsi tree.

There is just some tiny comment issue in one of the patches that I think 
is probably ok to fix up after the merge into scsi-misc. A code snippet 
got commented out when it should have got removed.

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2009-05-27  2:35 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-05-23 21:11 [PATCH 0/4] Add bnx2i driver Michael Chan
2009-05-23 21:11 ` [PATCH 1/4] iscsi class: Add new NETLINK_ISCSI messages for cnic/bnx2i driver Michael Chan
2009-05-23 21:11 ` [PATCH 2/4] bnx2: Add support for CNIC driver Michael Chan
     [not found] ` <1243113110-29635-1-git-send-email-mchan-dY08KVG/lbpWk0Htik3J/w@public.gmane.org>
2009-05-23 21:11   ` [PATCH 3/4] cnic: Add new Broadcom " Michael Chan
2009-05-25 15:19     ` Rolf Eike Beer
2009-05-26  5:35       ` Michael Chan
2009-05-27  2:35   ` [PATCH 0/4] Add bnx2i driver Mike Christie
2009-05-23 21:11 ` [PATCH 4/4] bnx2i: Add bnx2i iSCSI driver Michael Chan
2009-05-26 16:37   ` Grant Grundler
2009-05-26 16:49     ` Michael Chan
2009-05-26 17:03       ` Grant Grundler

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.