All of lore.kernel.org
 help / color / mirror / Atom feed
* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2009-11-17  8:30 Rasesh Mody
  2009-11-17  8:59 ` David Miller
  0 siblings, 1 reply; 30+ messages in thread
From: Rasesh Mody @ 2009-11-17  8:30 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3709 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  350 ++++++
 2 files changed, 4059 insertions(+)

diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.c net-next-2.6-mod/drivers/net/bna/bnad.c
--- net-next-2.6-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.c	2009-11-17 00:05:36.584576000 -0800
@@ -0,0 +1,3709 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include <cna.h>
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+
+
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+static uint bnad_msix = 1;
+static uint bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+static uint bnad_ipid_mode;
+static uint bnad_vlan_strip = 1;
+static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+
+u32
+bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void
+bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	bnad_conf_lock();
+	bnad_log_level = msglevel;
+	bnad_conf_unlock();
+}
+
+static unsigned int
+bnad_free_txbufs(struct bnad_txq_info *txqinfo, u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BNA_ASSERT(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BNA_ASSERT(skb);
+		unmap_array[unmap_cons].skb = NULL;
+		BNA_ASSERT(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags));
+		BNA_ASSERT(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void
+bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void
+bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void
+bnad_disable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void
+bnad_enable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int
+bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    netif_carrier_ok(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t
+bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void
+bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth);
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void
+bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int
+bnad_poll_cq(struct bnad *bnad, struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad);
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BNA_ASSERT(wi_range && wi_range <= cqinfo->cq.q.q_depth);
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		/* CATAPULT_BRINGUP : Should we add all the packets ? */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth);
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb_any(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BNA_ASSERT(cmpl->vlan_tag);
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t
+bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int
+bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void
+bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void
+bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int
+bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int
+bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int
+bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void
+bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void
+bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+
+	BNA_ASSERT(ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void
+bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void
+bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void
+bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void
+bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void
+bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void
+bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void
+bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int
+bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void
+bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void
+bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void
+bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+
+/* Called with bnad priv_lock held. */
+static void
+bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+	char message[BNA_MESSAGE_SIZE];
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			sprintf(message, "Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+		DPRINTK(INFO, "%s",
+				message);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void
+bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int
+bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BNA_ASSERT(BNA_POWER_OF_2(q_depth));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int
+bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+	char message[BNA_MESSAGE_SIZE];
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			sprintf(message,
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			sprintf(message,
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void
+bnad_reset_q(struct bnad *bnad, struct bna_q *q, struct bnad_unmap_q *unmap_q)
+{
+	u32 _ui;
+
+	BNA_ASSERT(q->producer_index == q->consumer_index);
+	BNA_ASSERT(unmap_q->producer_index == unmap_q->consumer_index);
+
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	for (_ui = 0; _ui < unmap_q->q_depth; _ui++)
+		BNA_ASSERT(!unmap_q->unmap_array[_ui].skb);
+}
+
+static void
+bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int
+bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+static int
+bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int
+bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void
+bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void
+bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void
+bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void
+bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void
+bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		sprintf(message, "%s is stopped", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+
+
+/* Should be called with conf_lock held */
+int
+bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int
+bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kzalloc(bnad->ib_num * sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void
+bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void
+bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int
+bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt, struct bna_q *q,
+	     size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kzalloc(qpt->page_count * sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void
+bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt, struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void
+bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void
+bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int
+bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int
+bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kzalloc(bnad->txq_num * sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int
+bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kzalloc(bnad->rxq_num * sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int
+bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kzalloc(bnad->cq_num * sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint
+bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int
+bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void
+bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BNA_ASSERT(cq_id < bnad->cq_num && ib_id < bnad->ib_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void
+bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void
+bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BNA_ASSERT(bnad->txf_table && txf_id < bnad->txf_num);
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void
+bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BNA_ASSERT(bnad->rxf_table && rxf_id < bnad->rxf_num);
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int
+bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kzalloc(sizeof(struct bnad_txf_info) * bnad->txf_num,
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kzalloc(sizeof(struct bnad_rxf_info) * bnad->rxf_num,
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void
+bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void
+bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+
+static void
+bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int
+bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void
+bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int
+bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kzalloc(bnad->cq_num * sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int
+bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+	char message[BNA_MESSAGE_SIZE];
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		sprintf(message, "%s init failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		sprintf(message, "%s config HW failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		sprintf(message, "%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	sprintf(message, "%s is opened", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+
+/* Should be called with conf_lock held */
+static
+	int
+bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		sprintf(message, "%s is not ready yet: IOC down", netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		sprintf(message, "%s is not ready yet: IOC disabled",
+			netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	default:
+		BNA_ASSERT(0);
+		break;
+	}
+	return err;
+}
+
+int
+bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int
+bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s open", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_open_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static int
+bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+
+
+int
+bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int
+bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s stop", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s port is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_stop_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		sprintf(message, "%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+		DPRINTK(INFO, "%s", message);
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		DPRINTK(INFO, "%s", message);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+
+static int
+bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BNA_ASSERT(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BNA_ASSERT(skb->protocol == htons(ETH_P_IPV6));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= txq->q.q_depth);
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BNA_ASSERT(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR);
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BNA_ASSERT(wi_range &&
+					   wi_range <= txq->q.q_depth);
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BNA_ASSERT(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR);
+		txqent->vector[vect_id].length = htons(frag->size);
+		BNA_ASSERT(unmap_q->unmap_array[unmap_prod].skb == NULL);
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats *
+bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void
+bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kzalloc((netdev->mc_count + 1) * sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		/* XXX Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void
+bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	       unsigned int cmd)
+{
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+	enum bna_status_e(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		sprintf(message, "%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		DPRINTK(INFO, "%s",
+			message);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int
+bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int
+bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_conf_unlock();
+	return err;
+
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_conf_lock();
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	bnad->vlangrp = grp;
+	bnad_conf_unlock();
+}
+
+static void
+bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+/* Should be called with priv_lock held. */
+static void
+bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			bnad_disable_rx_irq(bnad, cqinfo);
+			bnad_poll_cq(bnad, cqinfo, BNAD_MAX_Q_DEPTH);
+			bnad_enable_rx_irq(bnad, cqinfo);
+		}
+	}
+}
+#endif
+
+static void
+bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		/* VMware does not use RSS like Linux driver */
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void
+bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kzalloc(bnad->msix_num * sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void
+bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void
+bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void
+bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+		BNA_ASSERT(netdev->addr_len == sizeof(bnad->perm_addr));
+		memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, bnad->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			sprintf(message,
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+				message);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void
+bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+
+static void
+bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup, unsigned int prio)
+{
+
+
+	if (prio != bnad->curr_priority) {
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	} else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void
+bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	if (bnad->state != BNAD_S_OPEN) {
+		sprintf(message, "%s link up in state %d", netdev->name,
+			bnad->state);
+		DPRINTK(INFO, "%s", message);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void
+bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+
+	bnad_conf_lock();
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	bnad_conf_unlock();
+}
+
+static void
+bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void
+bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void
+bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void
+bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+static void
+bnad_ioc_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_timer(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->state != BNAD_S_UNLOADING)
+		mod_timer(&bnad->ioc_timer,
+			  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/* Ugly... need to remove once CAL is fixed. */
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void
+bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+	bfa_cee_detach(&bnad->cee);
+}
+
+static int
+bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = BNA_TRUE;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		printk(KERN_ERR "port %u failed allocating trace buffer!\n",
+		       bnad->bna_id);
+		return -ENOMEM;
+	}
+
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	init_MUTEX(&bnad->conf_sem);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	setup_timer(&bnad->ioc_timer, bnad_ioc_timeout,
+		    (unsigned long)bnad);
+	mod_timer(&bnad->ioc_timer, jiffies +
+		  msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+
+	bnad_conf_lock();
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	bnad_conf_unlock();
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	bna_uninit(bnad->priv);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void
+bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status_e err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->priv) {
+
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err || err == BNA_BUSY);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			sprintf(message,
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+		DPRINTK(INFO, "%s", message);
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		sprintf(message, "port %u IOC is disabled", bnad->bna_id);
+		DPRINTK(INFO, "%s", message);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+		/* Stop the timer after disabling IOC. */
+		del_timer_sync(&bnad->ioc_timer);
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		bnad_cee_detach(bnad);
+
+
+		bna_uninit(bnad->priv);
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00},
+	{0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	if (!bfad_get_firmware_buf(pdev)) {
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	memset(bnad, 0, sizeof(struct bnad));
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+#ifdef BNAD_VLAN_FEATURES
+	netdev->vlan_features = netdev->features;
+#endif
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BNA_ASSERT(netdev->addr_len == ETH_ALEN);
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+	memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit
+bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init
+bnad_module_init(void)
+{
+
+        printk(KERN_INFO "Brocade 10G Ethernet driver\n");
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit
+bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.h net-next-2.6-mod/drivers/net/bna/bnad.h
--- net-next-2.6-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.h	2009-11-17 00:05:36.616596000 -0800
@@ -0,0 +1,350 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include <cee/bfa_cee.h>
+#include "bna.h"
+
+#include "bnad_compat.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_conf_lock()	down(&bnad->conf_sem)
+#define bnad_conf_unlock()	up(&bnad->conf_sem)
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+	unsigned char perm_addr[ETH_ALEN];
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct semaphore    conf_sem;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-17  8:30 Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver Rasesh Mody
@ 2009-11-17  8:59 ` David Miller
  0 siblings, 0 replies; 30+ messages in thread
From: David Miller @ 2009-11-17  8:59 UTC (permalink / raw)
  To: rmody; +Cc: netdev, adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>
Date: Tue, 17 Nov 2009 00:30:55 -0800

> +#include <cna.h>

Using "cna.h" is more appropriate.  "<cna.h>" assumes the
current working directory is in the header search path.

> +static uint bnad_msix = 1;
> +static uint bnad_small_large_rxbufs = 1;
> +static uint bnad_rxqsets_used;
> +static uint bnad_ipid_mode;
> +static uint bnad_vlan_strip = 1;
> +static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
> +static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
> +static uint bnad_log_level ;

Many of these are a waste of space, on one or two counts.

Some of them are static settings that cannot be modified by
either module parameters or ethtool settings.  Therefore they
are constant and should be entirely removed from the driver
and all conditionals on those values completely removed.

In the worst case "const" should be added to them so the compiler
can optimize everything away.

Many of them are also booleans, so even they did stay a more
appropriate type would be 'bool' which typically consumes less space
than 'int' and also communicates better the variable's use.

Overall, this is a very sloppy set of driver knobs.

> +static void
> +bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
 ...
> +static void
> +bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);

Don't chop things up like this just to abide by the "80 column line limit"
rule.  This makes grepping produce useless results.  If I grep for
"bnad_vlan_rx_kill_vid" the grep output won't show the function's
return type.

> +u32
> +bnad_get_msglevel(struct net_device *netdev)

One line please.

> +void
> +bnad_set_msglevel(struct net_device *netdev, u32 msglevel)

Same here.  And for rest of driver.

> +	BNA_ASSERT(wis <=
> +		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth));

Please do not define your own assertion macros.  We have an
incredibly complete set of BUG traps and warning producing
state tests.  See "BUG_ON()", "WARN_ON()", "WARN()", etc.

Those macros should be used in favor of your's for many reasons,
the least of which is that WARN() is specifically tailored to
produce output that is parsable by kerneloops.org and other crash
dump analyzing tools.

> +static inline void
> +bnad_disable_txrx_irqs(struct bnad *bnad)
> +{

Is this performance critical?  If not, why inline?  Just drop
the inlines and let the compiler decide, we're not using 1990's
era compiler technology any more. :-)

> +	if (sent) {
> +		if (netif_queue_stopped(netdev) &&
> +		    netif_carrier_ok(netdev) &&
> +		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
> +		    BNAD_NETIF_WAKE_THRESHOLD) {
> +			netif_wake_queue(netdev);
> +			bnad->stats.netif_queue_wakeup++;
> +		}
> +		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);

Is this driver multiqueue?  It seems so.

Yet here you're only using global device queue flow control.
This means that if one TX queue fills up, it will stop packet
submission for all of your TX queues which is extremely
suboptimal.

Also the netif_carrier_ok() test is not correct here.  The networking
core will stop TX submissions and synchronize the TX stream when the
carrier goes down.  TX queue flow control is independant from carrier
handling.

> +	prefetch(bnad);
> +	prefetch(bnad->netdev);

Prefetching a variable then immediately dereferencing it is
completely pointless.

If you disagree show me a benchmark that shows otherwise.

> +static void
> +bnad_netpoll(struct net_device *netdev)
> +{
> +	struct bnad *bnad = netdev_priv(netdev);
> +	struct bnad_cq_info *cqinfo;
> +	int i;
> +
> +	if (!(bnad->config & BNAD_CF_MSIX)) {
> +		disable_irq(bnad->pcidev->irq);
> +		bnad_isr(bnad->pcidev->irq, netdev);
> +		enable_irq(bnad->pcidev->irq);
> +	} else {
> +		for (i = 0; i < bnad->cq_num; i++) {
> +			cqinfo = &bnad->cq_table[i];
> +			bnad_disable_rx_irq(bnad, cqinfo);
> +			bnad_poll_cq(bnad, cqinfo, BNAD_MAX_Q_DEPTH);
> +			bnad_enable_rx_irq(bnad, cqinfo);
> +		}
> +	}
> +}
> +#endif

This is not correct.  Even if you're not using BNAD_CF_MSIX
you are still using NAPI.  So you should run the ISR and let
NAPI polling get scheduled.

Then bnad_poll_cq() is always running from NAPI ->poll() context
and therefore you don't need to use things like dev_kfree_skb_any()
et al. in there, you can use plain dev_kfree_skb() which is much
more efficient and correct.

> +	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
> +		     (unsigned long)bnad);

Using a tasklet for TX packet liberation is dubious at best.   It's
just added overhead, scheduling yet another softirq from another
softirq (->poll() processing) when you can just invoke dev_kfree_skb()
directly.

> +	if (bnad->priv) {
> +
> +
> +		init_completion(&bnad->ioc_comp);

There's a lot of excessive spaces throughout the driver that looks
like this one, please clean them up.  Anything more than one empty
line is likely too much.

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2010-02-19 21:52 Rasesh Mody
@ 2010-02-22 12:40 ` Stanislaw Gruszka
  0 siblings, 0 replies; 30+ messages in thread
From: Stanislaw Gruszka @ 2010-02-22 12:40 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team

On Fri, Feb 19, 2010 at 01:52:38PM -0800, Rasesh Mody wrote:
> From: Rasesh Mody <rmody@brocade.com>
> 
> This is patch 1/6 which contains linux driver source for
> Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
> Source is based against net-next-2.6.
> 
> We wish this patch to be considered for inclusion in net-next-2.6
> 
> Signed-off-by: Rasesh Mody <rmody@brocade.com>
> ---
>  bnad.c | 3481 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  bnad.h |  341 ++++++
>  2 files changed, 3822 insertions(+)
[snip]
> void bnad_ioc_timeout(unsigned long ioc_arg)
> +{
> +	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
> +	spin_lock_irq(&bnad->priv_lock);
> +	spin_unlock_irq(&bnad->priv_lock);

Quite strange spin lock usage :)
  
> +u32
> +bnad_get_msglevel(struct net_device *netdev)
> +{
> +	return bnad_log_level;
> +}
> +
> +void
> +bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
> +{
> +	struct bnad *bnad = netdev_priv(netdev);
> +	mutex_lock(&bnad->conf_mutex);
> +	bnad_log_level = msglevel;
> +	mutex_unlock(&bnad->conf_mutex);
> +}

bnad_log_level is not used anywhere.


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2010-02-19 21:52 Rasesh Mody
  2010-02-22 12:40 ` Stanislaw Gruszka
  0 siblings, 1 reply; 30+ messages in thread
From: Rasesh Mody @ 2010-02-19 21:52 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3481 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  341 ++++++
 2 files changed, 3822 insertions(+)

diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.c net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.c
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.c	2010-02-19 13:42:27.216260000 -0800
@@ -0,0 +1,3481 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include "cna.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+const static bool bnad_msix = 1;
+const static bool bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+const static bool bnad_ipid_mode;
+const static bool bnad_vlan_strip = 1;
+const static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+const static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+DEFINE_MUTEX(bnad_fwimg_mutex);
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+void bnad_ioc_timeout(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_ioc_sem_timeout(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	bfa_ioc_sem_timeout(ioc_arg);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_ioc_hb_check(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	bfa_ioc_hb_check(ioc_arg);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+u32
+bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void
+bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	mutex_lock(&bnad->conf_mutex);
+	bnad_log_level = msglevel;
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+	u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BUG_ON(!(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth)));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		unmap_array[unmap_cons].skb = NULL;
+		BUG_ON(!(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags)));
+		BUG_ON(!(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons)) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void bnad_disable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void bnad_enable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= rxqinfo->rxq.q.q_depth));
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth));
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int bnad_poll_cq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BUG_ON(!(wi_range && wi_range <= cqinfo->cq.q.q_depth));
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth));
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int
+bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			pr_info("%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			pr_info("%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void
+bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			pr_info("Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int
+bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			pr_info(
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			pr_info(
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void bnad_reset_q(struct bnad *bnad, struct bna_q *q,
+	struct bnad_unmap_q *unmap_q)
+{
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+}
+
+static void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		pr_info("%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		pr_info("%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+	}
+
+	return err;
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		pr_info("%s is stopped", bnad->netdev->name);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held */
+int
+bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kcalloc(bnad->ib_num, sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void
+bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kcalloc(qpt->page_count, sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kcalloc(bnad->txq_num, sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int
+bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kcalloc(bnad->rxq_num, sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kcalloc(bnad->cq_num, sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void
+bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void
+bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void
+bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kcalloc(bnad->txf_num, sizeof(struct bnad_txf_info),
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kcalloc(bnad->rxf_num, sizeof(struct bnad_rxf_info),
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kcalloc(bnad->cq_num, sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		pr_info("%s init failed %d", netdev->name, err);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		pr_info("%s config HW failed %d", netdev->name, err);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		pr_info("%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	pr_info("%s is opened", bnad->netdev->name);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		pr_info("%s is not ready yet: IOC down", netdev->name);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		pr_info("%s is not ready yet: IOC disabled",
+			netdev->name);
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+	return err;
+}
+
+int
+bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int
+bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	pr_info("%s open", netdev->name);
+
+	mutex_lock(&bnad->conf_mutex);
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags))
+		pr_info("%s is disabled", netdev->name);
+	 else
+		err = bnad_open_locked(netdev);
+
+	mutex_unlock(&bnad->conf_mutex);
+
+	return err;
+}
+
+static int bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int
+bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int
+bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	pr_info("%s stop", netdev->name);
+
+	mutex_lock(&bnad->conf_mutex);
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags))
+		pr_info("%s port is disabled", netdev->name);
+	 else
+		err = bnad_stop_locked(netdev);
+
+	mutex_unlock(&bnad->conf_mutex);
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		pr_info("%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		pr_info("%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		pr_info("%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BUG_ON(!(skb_shinfo(skb))->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= txq->q.q_depth));
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BUG_ON(!(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR));
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BUG_ON(!(wi_range &&
+					   wi_range <= txq->q.q_depth));
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		txqent->vector[vect_id].length = htons(frag->size);
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats
+*bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kcalloc((netdev->mc_count + 1), sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	unsigned int cmd)
+{
+	int err = 0;
+	enum bna_status(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		pr_info("%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	err = bnad_set_mac_address_locked(netdev, addr);
+	mutex_unlock(&bnad->conf_mutex);
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	mutex_lock(&bnad->conf_mutex);
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	mutex_unlock(&bnad->conf_mutex);
+
+	return err;
+}
+
+static void bnad_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	bnad->vlangrp = grp;
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static void bnad_vlan_rx_kill_vid(struct net_device *netdev,
+	unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			if (likely(napi_schedule_prep(&cqinfo->napi))) {
+				bnad_disable_rx_irq(bnad, cqinfo);
+				__napi_schedule(&cqinfo->napi);
+			}
+		}
+	}
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BUG_ON(1);
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)netdev->perm_addr);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, netdev->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			pr_info(
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BUG_ON(1);
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+static void bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup,
+	unsigned int prio)
+{
+
+	if (prio != bnad->curr_priority)
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	 else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+
+	if (bnad->state != BNAD_S_OPEN) {
+		pr_info("%s link up in state %d", netdev->name,
+			bnad->state);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	mutex_lock(&bnad->conf_mutex);
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void
+bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void
+bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void
+bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+}
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = true;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	mutex_init(&bnad->conf_mutex);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	mutex_lock(&bnad->conf_mutex);
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv,
+		(struct mac *)bnad->netdev->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	mutex_unlock(&bnad->conf_mutex);
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status err;
+
+	if (bnad->priv) {
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			pr_info(
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		pr_info("port %u IOC is disabled", bnad->bna_id);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+		del_timer_sync(&bnad->priv->ioc.ioc_timer);
+		del_timer_sync(&bnad->priv->ioc.hb_timer);
+		del_timer_sync(&bnad->priv->ioc.sem_timer);
+
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		mutex_destroy(&bnad->conf_mutex);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = { 
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 
+			PCI_DEVICE_ID_BROCADE_CATAPULT),
+		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
+		.class_mask =  0xffff00
+	}, {0,  }
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit bnad_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	mutex_lock(&bnad_fwimg_mutex);
+	if (!bfad_get_firmware_buf(pdev)) { /* Returns image */
+		mutex_unlock(&bnad_fwimg_mutex);
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+	mutex_unlock(&bnad_fwimg_mutex);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+	netdev->vlan_features = netdev->features;
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	memcpy(netdev->dev_addr, netdev->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver\n");
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.h net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.h
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.h	2010-02-19 13:42:27.224250000 -0800
@@ -0,0 +1,341 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include "cee/bfa_cee.h"
+#include "bna.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct mutex conf_mutex; 
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2010-02-12 14:00 Rasesh Mody
  0 siblings, 0 replies; 30+ messages in thread
From: Rasesh Mody @ 2010-02-12 14:00 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3503 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  343 ++++++
 2 files changed, 3846 insertions(+)

diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.c net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.c
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.c	2010-02-12 01:39:39.940983000 -0800
@@ -0,0 +1,3503 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include "cna.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+const static bool bnad_msix = 1;
+const static bool bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+const static bool bnad_ipid_mode;
+const static bool bnad_vlan_strip = 1;
+const static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+const static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+DEFINE_MUTEX(bnad_fwimg_mutex);
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+void bnad_ioc_timeout(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_ioc_sem_timeout(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	bfa_ioc_sem_timeout(ioc_arg);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_ioc_hb_check(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	bfa_ioc_hb_check(ioc_arg);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+u32
+bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void
+bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	mutex_lock(&bnad->conf_mutex);
+	bnad_log_level = msglevel;
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+	u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BUG_ON(!(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth)));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		unmap_array[unmap_cons].skb = NULL;
+		BUG_ON(!(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags)));
+		BUG_ON(!(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons)) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void bnad_disable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void bnad_enable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= rxqinfo->rxq.q.q_depth));
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth));
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int bnad_poll_cq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BUG_ON(!(wi_range && wi_range <= cqinfo->cq.q.q_depth));
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		/* CATAPULT_BRINGUP : Should we add all the packets ? */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth));
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int
+bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			pr_info("%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			pr_info("%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void
+bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			pr_info("Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int
+bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			pr_info(
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			pr_info(
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void bnad_reset_q(struct bnad *bnad, struct bna_q *q,
+	struct bnad_unmap_q *unmap_q)
+{
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+}
+
+static void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		pr_info("%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		pr_info("%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+	}
+
+	return err;
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		pr_info("%s is stopped", bnad->netdev->name);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held */
+int
+bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kcalloc(bnad->ib_num, sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void
+bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kcalloc(qpt->page_count, sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kcalloc(bnad->txq_num, sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int
+bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kcalloc(bnad->rxq_num, sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kcalloc(bnad->cq_num, sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void
+bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void
+bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void
+bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kcalloc(bnad->txf_num, sizeof(struct bnad_txf_info),
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kcalloc(bnad->rxf_num, sizeof(struct bnad_rxf_info),
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kcalloc(bnad->cq_num, sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		pr_info("%s init failed %d", netdev->name, err);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		pr_info("%s config HW failed %d", netdev->name, err);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		pr_info("%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	pr_info("%s is opened", bnad->netdev->name);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		pr_info("%s is not ready yet: IOC down", netdev->name);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		pr_info("%s is not ready yet: IOC disabled",
+			netdev->name);
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+	return err;
+}
+
+int
+bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int
+bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	pr_info("%s open", netdev->name);
+
+	mutex_lock(&bnad->conf_mutex);
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags))
+		pr_info("%s is disabled", netdev->name);
+	 else
+		err = bnad_open_locked(netdev);
+
+	mutex_unlock(&bnad->conf_mutex);
+
+	return err;
+}
+
+static int bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int
+bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int
+bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	pr_info("%s stop", netdev->name);
+
+	mutex_lock(&bnad->conf_mutex);
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags))
+		pr_info("%s port is disabled", netdev->name);
+	 else
+		err = bnad_stop_locked(netdev);
+
+	mutex_unlock(&bnad->conf_mutex);
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		pr_info("%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		pr_info("%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		pr_info("%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BUG_ON(!(skb_shinfo(skb))->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= txq->q.q_depth));
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BUG_ON(!(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR));
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BUG_ON(!(wi_range &&
+					   wi_range <= txq->q.q_depth));
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		txqent->vector[vect_id].length = htons(frag->size);
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats
+*bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kcalloc((netdev->mc_count + 1), sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		/* XXX Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	unsigned int cmd)
+{
+	int err = 0;
+	enum bna_status(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		pr_info("%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	err = bnad_set_mac_address_locked(netdev, addr);
+	mutex_unlock(&bnad->conf_mutex);
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	mutex_lock(&bnad->conf_mutex);
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	mutex_unlock(&bnad->conf_mutex);
+
+	return err;
+}
+
+static void bnad_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	bnad->vlangrp = grp;
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static void bnad_vlan_rx_kill_vid(struct net_device *netdev,
+	unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	mutex_lock(&bnad->conf_mutex);
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			if (likely(napi_schedule_prep(&cqinfo->napi))) {
+				bnad_disable_rx_irq(bnad, cqinfo);
+				__napi_schedule(&cqinfo->napi);
+			}
+		}
+	}
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		/* VMware does not use RSS like Linux driver */
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BUG_ON(1);
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)netdev->perm_addr);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, netdev->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			pr_info(
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BUG_ON(1);
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+static void bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup,
+	unsigned int prio)
+{
+
+	if (prio != bnad->curr_priority)
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	 else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+
+	if (bnad->state != BNAD_S_OPEN) {
+		pr_info("%s link up in state %d", netdev->name,
+			bnad->state);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	mutex_lock(&bnad->conf_mutex);
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	mutex_unlock(&bnad->conf_mutex);
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void
+bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void
+bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void
+bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/* Ugly... need to remove once CAL is fixed. */
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+}
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = true;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		printk(KERN_ERR "port %u failed allocating trace buffer!\n",
+		       bnad->bna_id);
+		return -ENOMEM;
+	}
+
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	mutex_init(&bnad->conf_mutex);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+
+	mutex_lock(&bnad->conf_mutex);
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv,
+		(struct mac *)bnad->netdev->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	mutex_unlock(&bnad->conf_mutex);
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status err;
+
+	if (bnad->priv) {
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			pr_info(
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		pr_info("port %u IOC is disabled", bnad->bna_id);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+		del_timer_sync(&bnad->priv->ioc.ioc_timer);
+		del_timer_sync(&bnad->priv->ioc.hb_timer);
+		del_timer_sync(&bnad->priv->ioc.sem_timer);
+
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		mutex_destroy(&bnad->conf_mutex);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = { 
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 
+			PCI_DEVICE_ID_BROCADE_CATAPULT),
+		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
+		.class_mask =  0xffff00
+	}, {0,  }
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit bnad_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	mutex_lock(&bnad_fwimg_mutex);
+	if (!bfad_get_firmware_buf(pdev)) { /* Returns image */
+		mutex_unlock(&bnad_fwimg_mutex);
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+	mutex_unlock(&bnad_fwimg_mutex);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+	netdev->vlan_features = netdev->features;
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	memcpy(netdev->dev_addr, netdev->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver\n");
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.h net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.h
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.h	2010-02-12 01:39:40.261979000 -0800
@@ -0,0 +1,343 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include "cee/bfa_cee.h"
+#include "bna.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct mutex conf_mutex; 
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2010-02-10  6:29 Rasesh Mody
                   ` (2 preceding siblings ...)
  2010-02-10 17:10 ` Stephen Hemminger
@ 2010-02-10 17:15 ` Stephen Hemminger
  3 siblings, 0 replies; 30+ messages in thread
From: Stephen Hemminger @ 2010-02-10 17:15 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team

On Tue, 9 Feb 2010 22:29:15 -0800
Rasesh Mody <rmody@brocade.com> wrote:

> +static struct pci_device_id bnad_pci_id_table[] = {
> +	{
> +	 .vendor = PCI_VENDOR_ID_BROCADE,
> +	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
> +	 .subvendor = PCI_ANY_ID,
> +	 .subdevice = PCI_ANY_ID,
> +	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
> +	 .class_mask = 0xffff00},
> +	{0, 0}
> +};
> +
> +MODULE_DEVICE_TABLE(pci, bnad_pci_id_table

Use:

static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
     
not sure if you require class/class_mask or can use
	PCI_VDEVICE(BROCADE, PCI_DEVICE_ID_BROCADE_CATAPLUT)

-- 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2010-02-10  6:29 Rasesh Mody
  2010-02-10 17:08 ` Stephen Hemminger
  2010-02-10 17:09 ` Stephen Hemminger
@ 2010-02-10 17:10 ` Stephen Hemminger
  2010-02-10 17:15 ` Stephen Hemminger
  3 siblings, 0 replies; 30+ messages in thread
From: Stephen Hemminger @ 2010-02-10 17:10 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team

On Tue, 9 Feb 2010 22:29:15 -0800
Rasesh Mody <rmody@brocade.com> wrote:

> +	netdev = alloc_etherdev(sizeof(struct bnad));
> +	if (!netdev) {
> +		dev_err(&pdev->dev, "alloc_etherdev failed\n");
> +		err = -ENOMEM;
> +		goto release_regions;
> +	}
> +	SET_NETDEV_DEV(netdev, &pdev->dev);
> +	pci_set_drvdata(pdev, netdev);
> +
> +	bnad = netdev_priv(netdev);
> +
> +	memset(bnad, 0, sizeof(struct bnad));

The private area provided by alloc_etherdev is already 
guaranteed to be zerod.

-- 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2010-02-10  6:29 Rasesh Mody
  2010-02-10 17:08 ` Stephen Hemminger
@ 2010-02-10 17:09 ` Stephen Hemminger
  2010-02-10 17:10 ` Stephen Hemminger
  2010-02-10 17:15 ` Stephen Hemminger
  3 siblings, 0 replies; 30+ messages in thread
From: Stephen Hemminger @ 2010-02-10 17:09 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team

On Tue, 9 Feb 2010 22:29:15 -0800
Rasesh Mody <rmody@brocade.com> wrote:

> +#define bnad_conf_lock()	down(&bnad->conf_sem)
> +#define bnad_conf_unlock()	up(&bnad->conf_sem)

Don't wrap locking in macros.
Don't use semaphores as locks, use mutex.

-- 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2010-02-10  6:29 Rasesh Mody
@ 2010-02-10 17:08 ` Stephen Hemminger
  2010-02-10 17:09 ` Stephen Hemminger
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 30+ messages in thread
From: Stephen Hemminger @ 2010-02-10 17:08 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team

On Tue, 9 Feb 2010 22:29:15 -0800
Rasesh Mody <rmody@brocade.com> wrote:

> +		BUG_ON(!(skb));

This driver seems to have lots of extra BUG_ON assertions
which implies either paranoia or still in development.

Putting in a check for NULL in places like this is not really
helpful. The check requires overhead, and will not provide any additional
help to the user. If the BUG_ON() is there the user will see
a backtrace and that cpu will be stuck ... if the BUG_ON was omitted,
and the skb was NULL, user would see a backtrace and cpu would
be stuck.

-- 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2010-02-10  6:29 Rasesh Mody
  2010-02-10 17:08 ` Stephen Hemminger
                   ` (3 more replies)
  0 siblings, 4 replies; 30+ messages in thread
From: Rasesh Mody @ 2010-02-10  6:29 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3542 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  346 ++++++
 2 files changed, 3888 insertions(+)

diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.c net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.c
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.c	2010-02-09 22:08:04.688555000 -0800
@@ -0,0 +1,3542 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include "cna.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+const static bool bnad_msix = 1;
+const static bool bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+const static bool bnad_ipid_mode;
+const static bool bnad_vlan_strip = 1;
+const static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+const static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+DECLARE_MUTEX(bnad_fwimg_sem);
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+void bnad_ioc_timeout(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_ioc_sem_timeout(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	bfa_ioc_sem_timeout(ioc_arg);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_ioc_hb_check(unsigned long ioc_arg)
+{
+	struct bnad *bnad = (struct bnad *)(((struct bfa_ioc*)ioc_arg)->bfa);
+	spin_lock_irq(&bnad->priv_lock);
+	bfa_ioc_hb_check(ioc_arg);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+u32
+bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void
+bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	bnad_conf_lock();
+	bnad_log_level = msglevel;
+	bnad_conf_unlock();
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+	u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BUG_ON(!(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth)));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BUG_ON(!(skb));
+		unmap_array[unmap_cons].skb = NULL;
+		BUG_ON(!(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags)));
+		BUG_ON(!(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons)) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void bnad_disable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void bnad_enable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= rxqinfo->rxq.q.q_depth));
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth));
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int bnad_poll_cq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BUG_ON(!(wi_range && wi_range <= cqinfo->cq.q.q_depth));
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BUG_ON(!(skb));
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		/* CATAPULT_BRINGUP : Should we add all the packets ? */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth));
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BUG_ON(!(cmpl->vlan_tag));
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BUG_ON(!(txq_id < bnad->txq_num));
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int
+bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BUG_ON(!(cq_id < bnad->cq_num));
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			pr_info("%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			pr_info("%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void
+bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	BUG_ON(!(ib_id < bnad->ib_num));
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			pr_info("Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int
+bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BUG_ON(!(BNA_POWER_OF_2(q_depth)));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			pr_info(
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			pr_info(
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void bnad_reset_q(struct bnad *bnad, struct bna_q *q,
+	struct bnad_unmap_q *unmap_q)
+{
+	u32 _ui;
+
+	BUG_ON(!(q->producer_index == q->consumer_index));
+	BUG_ON(!(unmap_q->producer_index == unmap_q->consumer_index));
+
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	for (_ui = 0; _ui < unmap_q->q_depth; _ui++)
+		BUG_ON(!(!unmap_q->unmap_array[_ui].skb));
+}
+
+static void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BUG_ON(!(skb));
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		pr_info("%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		pr_info("%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+	}
+
+	return err;
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		pr_info("%s is stopped", bnad->netdev->name);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held */
+int
+bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BUG_ON(!(bnad->ib_table && ib_id < bnad->ib_num));
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kcalloc(bnad->ib_num, sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void
+bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BUG_ON(!(bnad->ib_table && ib_id < bnad->ib_num));
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kcalloc(qpt->page_count, sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BUG_ON(!(bnad->txq_table && txq_id < bnad->txq_num));
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BUG_ON(!(bnad->rxq_table && rxq_id < bnad->rxq_num));
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BUG_ON(!(bnad->cq_table && cq_id < bnad->cq_num));
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BUG_ON(!(bnad->txq_table && txq_id < bnad->txq_num));
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kcalloc(bnad->txq_num, sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BUG_ON(!(bnad->rxq_table && rxq_id < bnad->rxq_num));
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int
+bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kcalloc(bnad->rxq_num, sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BUG_ON(!(bnad->cq_table && cq_id < bnad->cq_num));
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kcalloc(bnad->cq_num, sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void
+bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BUG_ON(!(cq_id < bnad->cq_num && ib_id < bnad->ib_num));
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void
+bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BUG_ON(!(bnad->txf_table && txf_id < bnad->txf_num));
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void
+bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BUG_ON(!(bnad->rxf_table && rxf_id < bnad->rxf_num));
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kcalloc(bnad->txf_num, sizeof(struct bnad_txf_info),
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kcalloc(bnad->rxf_num, sizeof(struct bnad_rxf_info),
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BUG_ON(!(txq_id < bnad->txq_num));
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BUG_ON(!(rxq_id < bnad->rxq_num));
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BUG_ON(!(cq_id < bnad->cq_num));
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BUG_ON(!(bnad->rxq_table && rxq_id < bnad->rxq_num));
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kcalloc(bnad->cq_num, sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		pr_info("%s init failed %d", netdev->name, err);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		pr_info("%s config HW failed %d", netdev->name, err);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		pr_info("%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	pr_info("%s is opened", bnad->netdev->name);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		pr_info("%s is not ready yet: IOC down", netdev->name);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		pr_info("%s is not ready yet: IOC disabled",
+			netdev->name);
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+	return err;
+}
+
+int
+bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int
+bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	pr_info("%s open", netdev->name);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags))
+		pr_info("%s is disabled", netdev->name);
+	 else
+		err = bnad_open_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static int bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int
+bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int
+bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+
+	pr_info("%s stop", netdev->name);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags))
+		pr_info("%s port is disabled", netdev->name);
+	 else
+		err = bnad_stop_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		pr_info("%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		pr_info("%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		pr_info("%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BUG_ON(!(skb_shinfo(skb))->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= txq->q.q_depth));
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BUG_ON(!(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR));
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BUG_ON(!(wi_range &&
+					   wi_range <= txq->q.q_depth));
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BUG_ON(!(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR));
+		txqent->vector[vect_id].length = htons(frag->size);
+		BUG_ON(!(unmap_q->unmap_array[unmap_prod].skb == NULL));
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats
+*bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kcalloc((netdev->mc_count + 1), sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		/* XXX Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	unsigned int cmd)
+{
+	int err = 0;
+	enum bna_status(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		pr_info("%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_conf_unlock();
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_conf_lock();
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static void bnad_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	bnad->vlangrp = grp;
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_kill_vid(struct net_device *netdev,
+	unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			if (likely(napi_schedule_prep(&cqinfo->napi))) {
+				bnad_disable_rx_irq(bnad, cqinfo);
+				__napi_schedule(&cqinfo->napi);
+			}
+		}
+	}
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		/* VMware does not use RSS like Linux driver */
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BUG_ON(1);
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)netdev->perm_addr);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, netdev->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			pr_info(
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BUG_ON(1);
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+static void bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup,
+	unsigned int prio)
+{
+
+	if (prio != bnad->curr_priority)
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	 else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+
+	if (bnad->state != BNAD_S_OPEN) {
+		pr_info("%s link up in state %d", netdev->name,
+			bnad->state);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	bnad_conf_lock();
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	bnad_conf_unlock();
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void
+bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void
+bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void
+bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/* Ugly... need to remove once CAL is fixed. */
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+}
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = true;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		printk(KERN_ERR "port %u failed allocating trace buffer!\n",
+		       bnad->bna_id);
+		return -ENOMEM;
+	}
+
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	init_MUTEX(&bnad->conf_sem);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+
+	bnad_conf_lock();
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv,
+		(struct mac *)bnad->netdev->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	bnad_conf_unlock();
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status err;
+
+	if (bnad->priv) {
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BUG_ON(!(!err || err == BNA_BUSY));
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			pr_info(
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BUG_ON(!(!err));
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		pr_info("port %u IOC is disabled", bnad->bna_id);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+                del_timer_sync(&bnad->priv->ioc.ioc_timer);
+                del_timer_sync(&bnad->priv->ioc.hb_timer);
+                del_timer_sync(&bnad->priv->ioc.sem_timer);
+
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00},
+	{0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit bnad_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	down(&bnad_fwimg_sem);
+	if (!bfad_get_firmware_buf(pdev)) { /* Returns size of image */
+		up(&bnad_fwimg_sem);
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+	up(&bnad_fwimg_sem);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	memset(bnad, 0, sizeof(struct bnad));
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+	netdev->vlan_features = netdev->features;
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BUG_ON(!(netdev->addr_len == ETH_ALEN));
+	memcpy(netdev->dev_addr, netdev->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver\n");
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.h net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.h
--- net-next-2.6.33-rc5-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6.33-rc5-mod/drivers/net/bna/bnad.h	2010-02-09 22:08:04.729555000 -0800
@@ -0,0 +1,346 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include "cee/bfa_cee.h"
+#include "bna.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_conf_lock()	down(&bnad->conf_sem)
+#define bnad_conf_unlock()	up(&bnad->conf_sem)
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct semaphore    conf_sem;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-12-19  1:28 Debashis Dutt
@ 2009-12-19  7:14 ` Joe Perches
  0 siblings, 0 replies; 30+ messages in thread
From: Joe Perches @ 2009-12-19  7:14 UTC (permalink / raw)
  To: Debashis Dutt; +Cc: netdev, adapter_linux_open_src_team

On Fri, 2009-12-18 at 17:28 -0800, Debashis Dutt wrote:
> +static void bnad_hw_error(struct bnad *bnad, u8 status)
> +{
> +	char message[BNA_MESSAGE_SIZE];
[]
> +			sprintf(message, "Disabling Mbox IRQ %d for port %d",
> +				irq, bnad->bna_id);
> +		pr_info("%s",
> +				message);

All of the char message[BNA_MESSAGE_SIZE] declarations
are not necessary.

These messages are also missing trailing '\n' newlines.

It's better to not indirect via a buffer and simply use:

	pr_<level>("fmt\n", args);


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2009-12-19  1:28 Debashis Dutt
  2009-12-19  7:14 ` Joe Perches
  0 siblings, 1 reply; 30+ messages in thread
From: Debashis Dutt @ 2009-12-19  1:28 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Debashis Dutt <ddutt@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Debashis Dutt <ddutt@brocade.com>
---
 bnad.c | 3572 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  347 ++++++
 2 files changed, 3919 insertions(+)

diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.c net-next-2.6-mod/drivers/net/bna/bnad.c
--- net-next-2.6-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.c	2009-12-18 16:53:40.000000000 -0800
@@ -0,0 +1,3572 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include "cna.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+const static bool bnad_msix = 1;
+const static bool bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+const static bool bnad_ipid_mode;
+const static bool bnad_vlan_strip = 1;
+const static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+const static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+u32
+bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void
+bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	bnad_conf_lock();
+	bnad_log_level = msglevel;
+	bnad_conf_unlock();
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+	u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BUG_ON(!(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth)));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BUG_ON(!(skb));
+		unmap_array[unmap_cons].skb = NULL;
+		BUG_ON(!(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags)));
+		BUG_ON(!(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons)) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void bnad_disable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void bnad_enable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= rxqinfo->rxq.q.q_depth));
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth));
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int bnad_poll_cq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BUG_ON(!(wi_range && wi_range <= cqinfo->cq.q.q_depth));
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BUG_ON(!(skb));
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		/* CATAPULT_BRINGUP : Should we add all the packets ? */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BUG_ON(!(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth));
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BUG_ON(!(cmpl->vlan_tag));
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BUG_ON(!(txq_id < bnad->txq_num));
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int
+bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BUG_ON(!(cq_id < bnad->cq_num));
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void
+bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	BUG_ON(!(ib_id < bnad->ib_num));
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+	char message[BNA_MESSAGE_SIZE];
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			sprintf(message, "Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+		pr_info("%s",
+				message);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int
+bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BUG_ON(!(BNA_POWER_OF_2(q_depth)));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+	char message[BNA_MESSAGE_SIZE];
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			sprintf(message,
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			sprintf(message,
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void bnad_reset_q(struct bnad *bnad, struct bna_q *q,
+	struct bnad_unmap_q *unmap_q)
+{
+	u32 _ui;
+
+	BUG_ON(q->producer_index != q->consumer_index);
+	BUG_ON(unmap_q->producer_index != unmap_q->consumer_index);
+
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	for (_ui = 0; _ui < unmap_q->q_depth; _ui++)
+		BUG_ON(unmap_q->unmap_array[_ui].skb);
+}
+
+static void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BUG_ON(!(skb));
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+		pr_info("%s", message);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+		pr_info("%s", message);
+	}
+
+	return err;
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		sprintf(message, "%s is stopped", bnad->netdev->name);
+		pr_info("%s", message);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held */
+int
+bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BUG_ON(!(bnad->ib_table && ib_id < bnad->ib_num));
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kcalloc(bnad->ib_num, sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void
+bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BUG_ON(!(bnad->ib_table && ib_id < bnad->ib_num));
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kcalloc(qpt->page_count, sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BUG_ON(!(bnad->txq_table && txq_id < bnad->txq_num));
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BUG_ON(!(bnad->rxq_table && rxq_id < bnad->rxq_num));
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BUG_ON(!(bnad->cq_table && cq_id < bnad->cq_num));
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BUG_ON(!(bnad->txq_table && txq_id < bnad->txq_num));
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kcalloc(bnad->txq_num, sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BUG_ON(!(bnad->rxq_table && rxq_id < bnad->rxq_num));
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int
+bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kcalloc(bnad->rxq_num, sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BUG_ON(!(bnad->cq_table && cq_id < bnad->cq_num));
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kcalloc(bnad->cq_num, sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void
+bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BUG_ON(!(cq_id < bnad->cq_num && ib_id < bnad->ib_num));
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void
+bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BUG_ON(!(bnad->txf_table && txf_id < bnad->txf_num));
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void
+bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BUG_ON(!(bnad->rxf_table && rxf_id < bnad->rxf_num));
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kcalloc(bnad->txf_num, sizeof(struct bnad_txf_info),
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kcalloc(bnad->rxf_num, sizeof(struct bnad_rxf_info),
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BUG_ON(!(txq_id < bnad->txq_num));
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BUG_ON(!(rxq_id < bnad->rxq_num));
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BUG_ON(!(cq_id < bnad->cq_num));
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BUG_ON(!(bnad->rxq_table && rxq_id < bnad->rxq_num));
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kcalloc(bnad->cq_num, sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+	char message[BNA_MESSAGE_SIZE];
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		sprintf(message, "%s init failed %d", netdev->name, err);
+		pr_info("%s",
+			message);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		sprintf(message, "%s config HW failed %d", netdev->name, err);
+		pr_info("%s",
+			message);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		sprintf(message, "%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		pr_info("%s",
+			message);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	sprintf(message, "%s is opened", bnad->netdev->name);
+		pr_info("%s", message);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		sprintf(message, "%s is not ready yet: IOC down", netdev->name);
+		pr_info("%s", message);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		sprintf(message, "%s is not ready yet: IOC disabled",
+			netdev->name);
+		pr_info("%s", message);
+		break;
+	default:
+		break;
+	}
+	return err;
+}
+
+int
+bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int
+bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s open", netdev->name);
+		pr_info("%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s is disabled", netdev->name);
+		pr_info("%s", message);
+	} else
+		err = bnad_open_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static int bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int
+bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int
+bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s stop", netdev->name);
+		pr_info("%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s port is disabled", netdev->name);
+		pr_info("%s", message);
+	} else
+		err = bnad_stop_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		sprintf(message, "%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+		pr_info("%s", message);
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		pr_info("%s", message);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BUG_ON(!(skb_shinfo(skb))->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BUG_ON(skb->protocol != htons(ETH_P_IPV6));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BUG_ON(!(wi_range && wi_range <= txq->q.q_depth));
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BUG_ON(!(skb_headlen(skb)) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BUG_ON(!(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR));
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BUG_ON(!(wi_range &&
+					   wi_range <= txq->q.q_depth));
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BUG_ON(!(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR));
+		txqent->vector[vect_id].length = htons(frag->size);
+		BUG_ON(unmap_q->unmap_array[unmap_prod].skb != NULL);
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats
+*bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kcalloc((netdev->mc_count + 1), sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		/* XXX Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	unsigned int cmd)
+{
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+	enum bna_status(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		sprintf(message, "%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		pr_info("%s",
+			message);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_conf_unlock();
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_conf_lock();
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static void bnad_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	bnad->vlangrp = grp;
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_kill_vid(struct net_device *netdev,
+	unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			if (likely(napi_schedule_prep(&cqinfo->napi))) {
+				bnad_disable_rx_irq(bnad, cqinfo);
+				__napi_schedule(&cqinfo->napi);
+			}
+		}
+	}
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		/* VMware does not use RSS like Linux driver */
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+		BUG_ON(netdev->addr_len != sizeof(bnad->perm_addr));
+		memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, bnad->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			sprintf(message,
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		pr_info("%s",
+				message);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+static void bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup,
+	unsigned int prio)
+{
+
+	if (prio != bnad->curr_priority)
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	 else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->state != BNAD_S_OPEN) {
+		sprintf(message, "%s link up in state %d", netdev->name,
+			bnad->state);
+		pr_info("%s", message);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	bnad_conf_lock();
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	bnad_conf_unlock();
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void
+bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void
+bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void
+bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+static void
+bnad_ioc_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_timer(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->state != BNAD_S_UNLOADING)
+		mod_timer(&bnad->ioc_timer,
+			  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/* Ugly... need to remove once CAL is fixed. */
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+}
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = true;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		printk(KERN_ERR "port %u failed allocating trace buffer!\n",
+		       bnad->bna_id);
+		return -ENOMEM;
+	}
+
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	init_MUTEX(&bnad->conf_sem);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	setup_timer(&bnad->ioc_timer, bnad_ioc_timeout,
+		    (unsigned long)bnad);
+	mod_timer(&bnad->ioc_timer, jiffies +
+		  msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+
+	bnad_conf_lock();
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	bnad_conf_unlock();
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->priv) {
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BUG_ON(!(!err || err == BNA_BUSY));
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			sprintf(message,
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+		pr_info("%s", message);
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BUG_ON(err);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		sprintf(message, "port %u IOC is disabled", bnad->bna_id);
+		pr_info("%s", message);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+		/* Stop the timer after disabling IOC. */
+		del_timer_sync(&bnad->ioc_timer);
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00},
+	{0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit bnad_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	if (!bfad_get_firmware_buf(pdev)) {
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	memset(bnad, 0, sizeof(struct bnad));
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+	netdev->vlan_features = netdev->features;
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BUG_ON(netdev->addr_len != ETH_ALEN);
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+	memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver\n");
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.h net-next-2.6-mod/drivers/net/bna/bnad.h
--- net-next-2.6-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.h	2009-12-18 16:53:40.000000000 -0800
@@ -0,0 +1,347 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include "cee/bfa_cee.h"
+#include "bna.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_conf_lock()	down(&bnad->conf_sem)
+#define bnad_conf_unlock()	up(&bnad->conf_sem)
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+	unsigned char perm_addr[ETH_ALEN];
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct semaphore    conf_sem;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2009-11-26  9:28 Debashis Dutt
  0 siblings, 0 replies; 30+ messages in thread
From: Debashis Dutt @ 2009-11-26  9:28 UTC (permalink / raw)
  To: netdev

From: Debashis Dutt <ddutt@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Debashis Dutt <ddutt@brocade.com>
---
 bnad.c | 3540 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  347 ++++++
 2 files changed, 3887 insertions(+)

diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.c net-next-2.6-mod/drivers/net/bna/bnad.c
--- net-next-2.6-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.c	2009-11-26 00:07:07.000000000 -0800
@@ -0,0 +1,3540 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include "cna.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+const static bool bnad_msix = 1;
+const static bool bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+const static bool bnad_ipid_mode;
+const static bool bnad_vlan_strip = 1;
+const static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+const static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+u32 bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	bnad_conf_lock();
+	bnad_log_level = msglevel;
+	bnad_conf_unlock();
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+	u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BNA_ASSERT(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BNA_ASSERT(skb);
+		unmap_array[unmap_cons].skb = NULL;
+		BNA_ASSERT(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags));
+		BNA_ASSERT(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void bnad_disable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void bnad_enable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth);
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int bnad_poll_cq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BNA_ASSERT(wi_range && wi_range <= cqinfo->cq.q.q_depth);
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		/* CATAPULT_BRINGUP : Should we add all the packets ? */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth);
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BNA_ASSERT(cmpl->vlan_tag);
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	BNA_ASSERT(ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+	char message[BNA_MESSAGE_SIZE];
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			sprintf(message, "Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+		DPRINTK(INFO, "%s",
+				message);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BNA_ASSERT(BNA_POWER_OF_2(q_depth));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+	char message[BNA_MESSAGE_SIZE];
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			sprintf(message,
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			sprintf(message,
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void bnad_reset_q(struct bnad *bnad, struct bna_q *q,
+	struct bnad_unmap_q *unmap_q)
+{
+	u32 _ui;
+
+	BNA_ASSERT(q->producer_index == q->consumer_index);
+	BNA_ASSERT(unmap_q->producer_index == unmap_q->consumer_index);
+
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	for (_ui = 0; _ui < unmap_q->q_depth; _ui++)
+		BNA_ASSERT(!unmap_q->unmap_array[_ui].skb);
+}
+
+static void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		sprintf(message, "%s is stopped", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held */
+int bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kzalloc(bnad->ib_num * sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kzalloc(qpt->page_count * sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kzalloc(bnad->txq_num * sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kzalloc(bnad->rxq_num * sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kzalloc(bnad->cq_num * sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BNA_ASSERT(cq_id < bnad->cq_num && ib_id < bnad->ib_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BNA_ASSERT(bnad->txf_table && txf_id < bnad->txf_num);
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BNA_ASSERT(bnad->rxf_table && rxf_id < bnad->rxf_num);
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kzalloc(sizeof(struct bnad_txf_info) * bnad->txf_num,
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kzalloc(sizeof(struct bnad_rxf_info) * bnad->rxf_num,
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kzalloc(bnad->cq_num * sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+	char message[BNA_MESSAGE_SIZE];
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		sprintf(message, "%s init failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		sprintf(message, "%s config HW failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		sprintf(message, "%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	sprintf(message, "%s is opened", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+/* Should be called with conf_lock held */
+static int bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		sprintf(message, "%s is not ready yet: IOC down", netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		sprintf(message, "%s is not ready yet: IOC disabled",
+			netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	default:
+		BNA_ASSERT(0);
+		break;
+	}
+	return err;
+}
+
+int bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s open", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_open_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static int bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s stop", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s port is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_stop_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		sprintf(message, "%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+		DPRINTK(INFO, "%s", message);
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		DPRINTK(INFO, "%s", message);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BNA_ASSERT(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BNA_ASSERT(skb->protocol == htons(ETH_P_IPV6));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= txq->q.q_depth);
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BNA_ASSERT(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR);
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BNA_ASSERT(wi_range &&
+					   wi_range <= txq->q.q_depth);
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BNA_ASSERT(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR);
+		txqent->vector[vect_id].length = htons(frag->size);
+		BNA_ASSERT(unmap_q->unmap_array[unmap_prod].skb == NULL);
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats *bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kzalloc((netdev->mc_count + 1) * sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		/* XXX Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	unsigned int cmd)
+{
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+	enum bna_status(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		sprintf(message, "%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		DPRINTK(INFO, "%s",
+			message);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_conf_unlock();
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_conf_lock();
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static void bnad_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	bnad->vlangrp = grp;
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_kill_vid(struct net_device *netdev,
+	unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			if (likely(napi_schedule_prep(&cqinfo->napi))) {
+				bnad_disable_rx_irq(bnad, cqinfo);
+				__napi_schedule(&cqinfo->napi);
+			}
+		}
+	}
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		/* VMware does not use RSS like Linux driver */
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kzalloc(bnad->msix_num * sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+		BNA_ASSERT(netdev->addr_len == sizeof(bnad->perm_addr));
+		memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, bnad->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			sprintf(message,
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+				message);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+static void bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup,
+	unsigned int prio)
+{
+
+	if (prio != bnad->curr_priority) {
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	} else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->state != BNAD_S_OPEN) {
+		sprintf(message, "%s link up in state %d", netdev->name,
+			bnad->state);
+		DPRINTK(INFO, "%s", message);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	bnad_conf_lock();
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	bnad_conf_unlock();
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+static void bnad_ioc_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_timer(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->state != BNAD_S_UNLOADING)
+		mod_timer(&bnad->ioc_timer,
+			  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+}
+
+s32 bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/* Ugly... need to remove once CAL is fixed. */
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+}
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = BNA_TRUE;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		printk(KERN_ERR "port %u failed allocating trace buffer!\n",
+		       bnad->bna_id);
+		return -ENOMEM;
+	}
+
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	init_MUTEX(&bnad->conf_sem);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	setup_timer(&bnad->ioc_timer, bnad_ioc_timeout,
+		    (unsigned long)bnad);
+	mod_timer(&bnad->ioc_timer, jiffies +
+		  msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+
+	bnad_conf_lock();
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	bnad_conf_unlock();
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->priv) {
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err || err == BNA_BUSY);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			sprintf(message,
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+		DPRINTK(INFO, "%s", message);
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		sprintf(message, "port %u IOC is disabled", bnad->bna_id);
+		DPRINTK(INFO, "%s", message);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+		/* Stop the timer after disabling IOC. */
+		del_timer_sync(&bnad->ioc_timer);
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00},
+	{0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit bnad_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	if (!bfad_get_firmware_buf(pdev)) {
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	memset(bnad, 0, sizeof(struct bnad));
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+	netdev->vlan_features = netdev->features;
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BNA_ASSERT(netdev->addr_len == ETH_ALEN);
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+	memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver\n");
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.h net-next-2.6-mod/drivers/net/bna/bnad.h
--- net-next-2.6-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.h	2009-11-26 00:07:07.000000000 -0800
@@ -0,0 +1,347 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include "cee/bfa_cee.h"
+#include "bna.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_conf_lock()	down(&bnad->conf_sem)
+#define bnad_conf_unlock()	up(&bnad->conf_sem)
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+	unsigned char perm_addr[ETH_ALEN];
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct semaphore    conf_sem;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2009-11-24  3:51 Rasesh Mody
  0 siblings, 0 replies; 30+ messages in thread
From: Rasesh Mody @ 2009-11-24  3:51 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3548 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  349 ++++++
 2 files changed, 3897 insertions(+)

diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.c net-next-2.6-mod/drivers/net/bna/bnad.c
--- net-next-2.6-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.c	2009-11-23 13:36:23.217870000 -0800
@@ -0,0 +1,3548 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include "cna.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+const static bool bnad_msix = 1;
+const static bool bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+const static bool bnad_ipid_mode;
+const static bool bnad_vlan_strip = 1;
+const static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+const static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+u32 bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	bnad_conf_lock();
+	bnad_log_level = msglevel;
+	bnad_conf_unlock();
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+	u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BNA_ASSERT(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BNA_ASSERT(skb);
+		unmap_array[unmap_cons].skb = NULL;
+		BNA_ASSERT(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags));
+		BNA_ASSERT(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void bnad_disable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void bnad_enable_rx_irq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth);
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int bnad_poll_cq(struct bnad *bnad,
+	struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BNA_ASSERT(wi_range && wi_range <= cqinfo->cq.q.q_depth);
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		/* CATAPULT_BRINGUP : Should we add all the packets ? */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth);
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BNA_ASSERT(cmpl->vlan_tag);
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	BNA_ASSERT(ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+	char message[BNA_MESSAGE_SIZE];
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			sprintf(message, "Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+		DPRINTK(INFO, "%s",
+				message);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BNA_ASSERT(BNA_POWER_OF_2(q_depth));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+	char message[BNA_MESSAGE_SIZE];
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			sprintf(message,
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			sprintf(message,
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void bnad_reset_q(struct bnad *bnad, struct bna_q *q,
+	struct bnad_unmap_q *unmap_q)
+{
+	u32 _ui;
+
+	BNA_ASSERT(q->producer_index == q->consumer_index);
+	BNA_ASSERT(unmap_q->producer_index == unmap_q->consumer_index);
+
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	for (_ui = 0; _ui < unmap_q->q_depth; _ui++)
+		BNA_ASSERT(!unmap_q->unmap_array[_ui].skb);
+}
+
+static void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		sprintf(message, "%s is stopped", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held */
+int bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kzalloc(bnad->ib_num * sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kzalloc(qpt->page_count * sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kzalloc(bnad->txq_num * sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kzalloc(bnad->rxq_num * sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kzalloc(bnad->cq_num * sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BNA_ASSERT(cq_id < bnad->cq_num && ib_id < bnad->ib_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BNA_ASSERT(bnad->txf_table && txf_id < bnad->txf_num);
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BNA_ASSERT(bnad->rxf_table && rxf_id < bnad->rxf_num);
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kzalloc(sizeof(struct bnad_txf_info) * bnad->txf_num,
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kzalloc(sizeof(struct bnad_rxf_info) * bnad->rxf_num,
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kzalloc(bnad->cq_num * sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+	char message[BNA_MESSAGE_SIZE];
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		sprintf(message, "%s init failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		sprintf(message, "%s config HW failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		sprintf(message, "%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	sprintf(message, "%s is opened", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+/* Should be called with conf_lock held */
+static int bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		sprintf(message, "%s is not ready yet: IOC down", netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		sprintf(message, "%s is not ready yet: IOC disabled",
+			netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	default:
+		BNA_ASSERT(0);
+		break;
+	}
+	return err;
+}
+
+int bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s open", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_open_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static int bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s stop", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s port is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_stop_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		sprintf(message, "%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+		DPRINTK(INFO, "%s", message);
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		DPRINTK(INFO, "%s", message);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BNA_ASSERT(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BNA_ASSERT(skb->protocol == htons(ETH_P_IPV6));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= txq->q.q_depth);
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BNA_ASSERT(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR);
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BNA_ASSERT(wi_range &&
+					   wi_range <= txq->q.q_depth);
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BNA_ASSERT(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR);
+		txqent->vector[vect_id].length = htons(frag->size);
+		BNA_ASSERT(unmap_q->unmap_array[unmap_prod].skb == NULL);
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats *bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kzalloc((netdev->mc_count + 1) * sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		/* XXX Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	unsigned int cmd)
+{
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+	enum bna_status_e(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		sprintf(message, "%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		DPRINTK(INFO, "%s",
+			message);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_conf_unlock();
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_conf_lock();
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static void bnad_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	bnad->vlangrp = grp;
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+static void bnad_vlan_rx_kill_vid(struct net_device *netdev,
+	unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+/* Should be called with priv_lock held. */
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			if (likely(napi_schedule_prep(&cqinfo->napi))) {
+				bnad_disable_rx_irq(bnad, cqinfo);
+				__napi_schedule(&cqinfo->napi);
+			}
+		}
+	}
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		/* VMware does not use RSS like Linux driver */
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kzalloc(bnad->msix_num * sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+		BNA_ASSERT(netdev->addr_len == sizeof(bnad->perm_addr));
+		memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, bnad->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			sprintf(message,
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+				message);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+static void bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup,
+	unsigned int prio)
+{
+
+	if (prio != bnad->curr_priority) {
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	} else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->state != BNAD_S_OPEN) {
+		sprintf(message, "%s link up in state %d", netdev->name,
+			bnad->state);
+		DPRINTK(INFO, "%s", message);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	bnad_conf_lock();
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	bnad_conf_unlock();
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+static void bnad_ioc_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_timer(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->state != BNAD_S_UNLOADING)
+		mod_timer(&bnad->ioc_timer,
+			  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+}
+
+s32 bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/* Ugly... need to remove once CAL is fixed. */
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+	bfa_cee_detach(&bnad->cee);
+}
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = BNA_TRUE;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		printk(KERN_ERR "port %u failed allocating trace buffer!\n",
+		       bnad->bna_id);
+		return -ENOMEM;
+	}
+
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	init_MUTEX(&bnad->conf_sem);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	setup_timer(&bnad->ioc_timer, bnad_ioc_timeout,
+		    (unsigned long)bnad);
+	mod_timer(&bnad->ioc_timer, jiffies +
+		  msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+
+	bnad_conf_lock();
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	bnad_conf_unlock();
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	bna_uninit(bnad->priv);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status_e err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->priv) {
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err || err == BNA_BUSY);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			sprintf(message,
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+		DPRINTK(INFO, "%s", message);
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		sprintf(message, "port %u IOC is disabled", bnad->bna_id);
+		DPRINTK(INFO, "%s", message);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+		/* Stop the timer after disabling IOC. */
+		del_timer_sync(&bnad->ioc_timer);
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		bnad_cee_detach(bnad);
+
+		bna_uninit(bnad->priv);
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00},
+	{0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit bnad_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	if (!bfad_get_firmware_buf(pdev)) {
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	memset(bnad, 0, sizeof(struct bnad));
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+#ifdef BNAD_VLAN_FEATURES
+	netdev->vlan_features = netdev->features;
+#endif
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BNA_ASSERT(netdev->addr_len == ETH_ALEN);
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+	memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver\n");
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.h net-next-2.6-mod/drivers/net/bna/bnad.h
--- net-next-2.6-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.h	2009-11-23 13:36:23.225864000 -0800
@@ -0,0 +1,349 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include "cee/bfa_cee.h"
+#include "bna.h"
+
+#include "bnad_compat.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_conf_lock()	down(&bnad->conf_sem)
+#define bnad_conf_unlock()	up(&bnad->conf_sem)
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+	unsigned char perm_addr[ETH_ALEN];
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct semaphore    conf_sem;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2009-11-13  3:46 Rasesh Mody
  0 siblings, 0 replies; 30+ messages in thread
From: Rasesh Mody @ 2009-11-13  3:46 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Source is based against net-next-2.6.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3710 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  350 ++++++
 2 files changed, 4060 insertions(+)

diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.c net-next-2.6-mod/drivers/net/bna/bnad.c
--- net-next-2.6-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.c	2009-11-12 19:03:38.439405000 -0800
@@ -0,0 +1,3710 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include <cna.h>
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+
+
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+static uint bnad_msix = 1;
+static uint bnad_small_large_rxbufs = 1;
+static uint bnad_rxqsets_used;
+static uint bnad_ipid_mode;
+static uint bnad_vlan_strip = 1;
+static uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+static uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+static uint bnad_log_level ;
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq = 2;
+
+const char *bnad_states[] = {
+	"START",
+	"INIT",
+	"INIT_DOWN",
+	"INIT_DISABLING",
+	"INIT_DISABLED",
+	"OPENING",
+	"OPEN",
+	"OPEN_DOWN",
+	"OPEN_DISABING",
+	"OPEN_DISABLED",
+	"CLOSING",
+	"UNLOADING"
+};
+
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_disable_locked(struct bnad *bnad);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+
+
+u32
+bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void
+bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	bnad_conf_lock();
+	bnad_log_level = msglevel;
+	bnad_conf_unlock();
+}
+
+static unsigned int
+bnad_free_txbufs(struct bnad_txq_info *txqinfo, u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+				  updated_txq_cons, txqinfo->txq.q.q_depth);
+	BNA_ASSERT(wis <=
+		   BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BNA_ASSERT(skb);
+		unmap_array[unmap_cons].skb = NULL;
+		BNA_ASSERT(wis >=
+			   BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags));
+		BNA_ASSERT(((txqinfo->skb_unmap_q.producer_index -
+			     unmap_cons) & (txqinfo->skb_unmap_q.q_depth -
+					    1)) >=
+			   1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr), skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+				       pci_unmap_addr(&unmap_array[unmap_cons],
+						      dma_addr),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+					   0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+					txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static inline void
+bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void
+bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->txq_table[i].ib,
+					    bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv, &bnad->cq_table[i].ib,
+					    bnad->cq_table[i].
+					    rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void
+bnad_disable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void
+bnad_enable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+				    cqinfo->rx_coalescing_timeo);
+
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int
+bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	sent = bnad_free_txbufs(txqinfo,
+				(u16) (*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) &&
+		    netif_carrier_ok(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+		    BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t
+bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void
+bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc =
+		BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+				rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent,
+					     wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= rxqinfo->rxq.q.q_depth);
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr =
+			pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+				       rxqinfo->rxq_config.buffer_size,
+				       PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void
+bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT
+		    (&rxqinfo->skb_unmap_q,
+		     rxqinfo->skb_unmap_q.
+		     q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int
+bnad_poll_cq(struct bnad *bnad, struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad);
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BNA_ASSERT(wi_range && wi_range <= cqinfo->cq.q.q_depth);
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size,
+				 PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		/* CATAPULT_BRINGUP : Should we add all the packets ? */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+
+		wis++;
+		if (likely(--wi_range))
+			next_cmpl = cmpl + 1;
+		else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BNA_ASSERT(wi_range &&
+				   wi_range <= cqinfo->cq.q.q_depth);
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely
+		    (flags &
+		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+		      BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb_any(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely
+		    (bnad->rx_csum &&
+		     (((flags & BNA_CQ_EF_IPV4) &&
+		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		      (flags & BNA_CQ_EF_IPV6)) &&
+		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BNA_ASSERT(cmpl->vlan_tag);
+			vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+						 ntohs(cmpl->vlan_tag));
+		} else
+			netif_receive_skb(skb);
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t
+bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+		bna_mbox_err_handler(bnad->priv, intr_status);
+
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+
+	if (!intr_status) {
+		spin_unlock(&bnad->priv_lock);
+		return IRQ_NONE;
+	}
+
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	} else
+		spin_unlock(&bnad->priv_lock);
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int
+bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+				  &bnad_msix_err_mbox, 0,
+				  bnad->netdev->name, bnad->netdev);
+	} else {
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+				  IRQF_SHARED, bnad->netdev->name,
+				  bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+			"Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+static void
+bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void
+bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	free_irq(irq, bnad->netdev);
+}
+
+static int
+bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[txq_id].vector,
+			   &bnad_msix_tx, 0,
+			   bnad->txq_table[txq_id].name,
+			   &bnad->txq_table[txq_id]);
+}
+
+int
+bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	if (!(bnad->config & BNAD_CF_MSIX))
+		return 0;
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+			   &bnad_msix_rx, 0,
+			   bnad->cq_table[cq_id].name, &bnad->cq_table[cq_id]);
+}
+
+static int
+bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		u32 mask;
+		bna_intx_disable(bnad->priv, &mask);
+		mask &= ~0xffff;
+		bna_intx_enable(bnad->priv, mask);
+		for (i = 0; i < bnad->ib_num; i++)
+			bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for TxQ %d failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+					 &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			sprintf(message, "%s request irq for CQ %u failed %d",
+				bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void
+bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++) {
+			free_irq(entries[bnad->txq_num + i].vector,
+				 &bnad->cq_table[i]);
+		}
+	} else
+		synchronize_irq(bnad->pcidev->irq);
+}
+
+void
+bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+
+	BNA_ASSERT(ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+			  &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void
+bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void
+bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void
+bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void
+bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void
+bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void
+bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void
+bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static unsigned int
+bnad_get_priority(struct bnad *bnad, u8 prio_map)
+{
+	unsigned int i;
+
+	if (prio_map) {
+		for (i = 0; i < 8; i++) {
+			if ((prio_map >> i) & 0x1)
+				break;
+		}
+		return i;
+	}
+	return 0;
+}
+
+static void
+bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct bfi_ll_aen *up_aen = (struct bfi_ll_aen *)
+		(&bnad->priv->mb_msg);
+
+
+
+	bnad->cee_linkup = up_aen->cee_linkup;
+	bnad->priority = bnad_get_priority(bnad, up_aen->prio_map);
+
+	bnad->link_state = BNAD_LS_UP;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void
+bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+
+	bnad->link_state = BNAD_LS_DOWN;
+	bnad->work_flags |= BNAD_WF_LS_NOTIFY;
+
+	schedule_work(&bnad->work);
+}
+
+static void
+bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (bnad->state == BNAD_S_OPEN)
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+
+/* Called with bnad priv_lock held. */
+static void
+bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+	char message[BNA_MESSAGE_SIZE];
+
+	set_bit(BNAD_F_HWERROR, &bnad->flags);
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (!test_and_set_bit(BNAD_F_MBOX_IRQ_DISABLED, &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			sprintf(message, "Disabling Mbox IRQ %d for port %d",
+				irq, bnad->bna_id);
+		DPRINTK(INFO, "%s",
+				message);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (bnad->state != BNAD_S_UNLOADING)
+		schedule_work(&bnad->work);
+}
+
+static void
+bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad_hw_error(bnad, status);
+}
+
+int
+bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BNA_ASSERT(BNA_POWER_OF_2(q_depth));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth * sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	       q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int
+bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+	char message[BNA_MESSAGE_SIZE];
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+					 txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			sprintf(message,
+				"%s allocating Tx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+					 rxqinfo->rxq.q.q_depth);
+		if (err) {
+			sprintf(message,
+				"%s allocating Rx unmap Q %d failed: %d",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void
+bnad_reset_q(struct bnad *bnad, struct bna_q *q, struct bnad_unmap_q *unmap_q)
+{
+	u32 _ui;
+
+	BNA_ASSERT(q->producer_index == q->consumer_index);
+	BNA_ASSERT(unmap_q->producer_index == unmap_q->consumer_index);
+
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	for (_ui = 0; _ui < unmap_q->q_depth; _ui++)
+		BNA_ASSERT(!unmap_q->unmap_array[_ui].skb);
+}
+
+static void
+bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+				 pci_unmap_addr(&unmap_q->
+						unmap_array[unmap_q->
+							    consumer_index],
+						dma_addr),
+				 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+/* Should be called with conf_lock held. */
+static int
+bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto txq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto txq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+txq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop TxQ %u failed %d", bnad->netdev->name,
+			txq_id, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto rxq_stop_exit;
+	}
+
+	if (BNAD_NOT_READY(bnad)) {
+		err = BNA_FAIL;
+		goto rxq_stop_exit;
+	}
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+
+	if (err == BFI_LL_CMD_NOT_EXEC) {
+		if (bnad->state == BNAD_S_CLOSING)
+			err = 0;
+		else
+			err = BNA_FAIL;
+	}
+
+rxq_stop_exit:
+	if (err) {
+		sprintf(message, "%s stop RxQs(0x%llu) failed %d",
+			bnad->netdev->name, rxq_id_mask, err);
+		DPRINTK(INFO, "%s", message);
+	}
+
+	return err;
+}
+
+static int
+bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int
+bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+		container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.napi_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void
+bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll) (struct napi_struct *, int);
+	int i;
+
+	if (bnad->config & BNAD_CF_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi, napi_poll,
+			       64);
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void
+bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void
+bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+static void
+bnad_stop_data_path(struct bnad *bnad, int on_error)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!on_error && !BNAD_NOT_READY(bnad)) {
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+
+	/*
+	 * Remove tasklets if scheduled
+	 */
+	tasklet_kill(&bnad->tx_free_tasklet);
+}
+
+static void
+bnad_port_admin_locked(struct bnad *bnad, u8 up)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!BNAD_NOT_READY(bnad)) {
+		bna_port_admin(bnad->priv, up);
+		if (up)
+			mod_timer(&bnad->stats_timer, jiffies + HZ);
+		else
+			bnad->link_state = BNAD_LS_DOWN;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_stop_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	char message[BNA_MESSAGE_SIZE];
+
+	switch (bnad->state) {
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_CLOSING;
+		bnad_disable_locked(bnad);
+		bnad->state = BNAD_S_INIT;
+		sprintf(message, "%s is stopped", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN_DISABLED:
+		bnad->state = BNAD_S_INIT_DISABLED;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+
+
+/* Should be called with conf_lock held */
+int
+bnad_ioc_disabling_locked(struct bnad *bnad)
+{
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT_DISABLING;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+		bnad_disable_locked(bnad);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		bnad->state = BNAD_S_OPEN_DISABLING;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr =
+		pci_alloc_consistent(bnad->pcidev, L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int
+bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table =
+		kzalloc(bnad->ib_num * sizeof(struct bnad_ib_entry),
+			GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void
+bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+				    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void
+bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int
+bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt, struct bna_q *q,
+	     size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+
+	qpt->kv_qpt_ptr =
+		pci_alloc_consistent(bnad->pcidev,
+				     qpt->page_count *
+				     sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+
+	q->qpt_ptr = kzalloc(qpt->page_count * sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] =
+			pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+					     &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+				 &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+	}
+
+	return 0;
+}
+
+static void
+bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt, struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(&
+						 ((struct bna_dma_addr *)qpt->
+						  kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+						    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+				    qpt->page_count *
+				    sizeof(struct bna_dma_addr),
+				    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void
+bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void
+bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+}
+
+static void
+bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int
+bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+			   bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+		 bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int
+bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table =
+		kzalloc(bnad->txq_num * sizeof(struct bnad_txq_info),
+			GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+			   bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int
+bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table =
+		kzalloc(bnad->rxq_num * sizeof(struct bnad_rxq_info),
+			GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int
+bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+			   bnad->rxq_depth * bnad_rxqs_per_cq *
+			   sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+		 bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int
+bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table =
+		kzalloc(bnad->cq_num * sizeof(struct bnad_cq_info), GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint
+bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu >= ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int
+bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->config & BNAD_CF_TXQ_DEPTH))
+		bnad->txq_depth =
+			bnad_get_qsize(bnad_txq_depth, bnad->netdev->mtu);
+	if (!(bnad->config & BNAD_CF_RXQ_DEPTH))
+		bnad->rxq_depth =
+			bnad_get_qsize(bnad_rxq_depth, bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void
+bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BNA_ASSERT(cq_id < bnad->cq_num && ib_id < bnad->ib_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *) (ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags =
+		BNA_IB_CF_INT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	if (bnad->config & BNAD_CF_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void
+bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags =
+			BNA_IB_CF_INTER_PKT_DMA | BNA_IB_CF_INT_ENABLE |
+			BNA_IB_CF_COALESCING_MODE | BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->config & BNAD_CF_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void
+bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BNA_ASSERT(bnad->txf_table && txf_id < bnad->txf_num);
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags =
+		BNA_TXF_CF_VLAN_WI_BASED | BNA_TXF_CF_ENABLE;
+}
+
+void
+bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BNA_ASSERT(bnad->rxf_table && rxf_id < bnad->rxf_num);
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type =
+			BNA_RSS_V4_TCP | BNA_RSS_V4_IP | BNA_RSS_V6_TCP |
+			BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+				 sizeof(rxf_rss->toeplitz_hash_key));
+	}
+}
+
+static int
+bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table =
+		kzalloc(sizeof(struct bnad_txf_info) * bnad->txf_num,
+			GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table =
+		kzalloc(sizeof(struct bnad_rxf_info) * bnad->rxf_num,
+			GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+		      (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void
+bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+
+	/* CEE state should not change while we do this */
+	spin_lock_irq(&bnad->priv_lock);
+	if (!bnad->cee_linkup) {
+		txqinfo->txq_config.priority = bnad->curr_priority = txq_id;
+		clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	} else {
+		txqinfo->txq_config.priority = bnad->curr_priority =
+			bnad->priority;
+		set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+	}
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id, &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+			(bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+			bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id, &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id, &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void
+bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+
+static void
+bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else {
+			bnad->rit[i].large_rxq_id = i;
+		}
+	}
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET, bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void
+bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+				   rxqinfo->skb_unmap_q.q_depth);
+}
+
+static int
+bnad_config_hw(struct bnad *bnad)
+{
+	int i, err = 0;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	for (i = 0; i < bnad->txq_num; i++) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_txq(bnad, i);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		spin_unlock_irq(&bnad->priv_lock);
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		spin_lock_irq(&bnad->priv_lock);
+		if (err || BNAD_NOT_READY(bnad))
+			goto unlock_and_return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+			   &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+				   &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+
+	spin_unlock_irq(&bnad->priv_lock);
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	err = bnad_set_mac_address_locked(netdev, &sa);
+	spin_lock_irq(&bnad->priv_lock);
+	if (err || BNAD_NOT_READY(bnad))
+		goto unlock_and_return;
+
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_setup_ibs(bnad);
+	return 0;
+
+unlock_and_return:
+	if (BNAD_NOT_READY(bnad))
+		err = BNA_FAIL;
+	spin_unlock_irq(&bnad->priv_lock);
+	return err;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void
+bnad_cleanup(struct bnad *bnad)
+{
+
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int
+bnad_init(struct bnad *bnad)
+{
+	int err;
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit =
+		kzalloc(bnad->cq_num * sizeof(struct bna_rit_entry),
+			GFP_KERNEL);
+	if (!bnad->rit)
+		goto finished;
+
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+static int
+bnad_enable_locked(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int err = 0;
+	uint i;
+	char message[BNA_MESSAGE_SIZE];
+
+	bnad->state = BNAD_S_OPENING;
+
+	err = bnad_init(bnad);
+	if (err) {
+		sprintf(message, "%s init failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		bnad->state = BNAD_S_INIT;
+		return err;
+	}
+
+	err = bnad_config_hw(bnad);
+	if (err) {
+		sprintf(message, "%s config HW failed %d", netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		sprintf(message, "%s requests Tx/Rx irqs failed: %d",
+			bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+			message);
+		goto init_failed;
+	}
+	bnad_napi_init(bnad);
+	bnad_napi_enable(bnad);
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	bnad->state = BNAD_S_OPEN;
+	sprintf(message, "%s is opened", bnad->netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (BNAD_NOT_READY(bnad)) {
+		/* Let bnad_error take care of the error. */
+		spin_unlock_irq(&bnad->priv_lock);
+		return 0;
+	}
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	return 0;
+
+init_failed:
+	bnad_cleanup(bnad);
+	bnad->state = BNAD_S_INIT;
+	return err;
+}
+
+
+/* Should be called with conf_lock held */
+static
+	int
+bnad_open_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		err = bnad_enable_locked(bnad);
+		break;
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		sprintf(message, "%s is not ready yet: IOC down", netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	case BNAD_S_INIT_DISABLED:
+		bnad->state = BNAD_S_OPEN_DISABLED;
+		sprintf(message, "%s is not ready yet: IOC disabled",
+			netdev->name);
+		DPRINTK(INFO, "%s", message);
+		break;
+	default:
+		BNA_ASSERT(0);
+		break;
+	}
+	return err;
+}
+
+int
+bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (!err && (bnad->state == BNAD_S_OPEN))
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return err;
+}
+
+int
+bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s open", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_open_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static int
+bnad_disable_locked(struct bnad *bnad)
+{
+	int err = 0, i;
+	u64 rxq_id_mask = 0;
+
+
+	bnad_stop_data_path(bnad, 0);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			goto cleanup;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			goto cleanup;
+	}
+
+cleanup:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+
+
+int
+bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	return bnad_stop_locked_internal(netdev);
+}
+
+int
+bnad_stop(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+	sprintf(message, "%s stop", netdev->name);
+		DPRINTK(INFO, "%s", message);
+
+	bnad_conf_lock();
+
+	if (test_bit(BNAD_F_BCU_DISABLED, &bnad->flags)) {
+		sprintf(message, "%s port is disabled", netdev->name);
+		DPRINTK(INFO, "%s", message);
+	} else
+		err = bnad_stop_locked(netdev);
+
+	bnad_conf_unlock();
+
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked_internal(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	err = bnad_stop_locked_internal(netdev);
+	if (err) {
+		sprintf(message, "%s sw reset internal: stop failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+
+	err = bnad_open_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset internal: open failed %d",
+			bnad->netdev->name, err);
+		goto done;
+	}
+	return 0;
+done:
+		DPRINTK(INFO, "%s", message);
+	return err;
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_sw_reset_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	if (bnad->state != BNAD_S_OPEN)
+		return 0;
+
+	bnad_port_admin_locked(bnad, BNA_DISABLE);
+
+	err = bnad_sw_reset_locked_internal(netdev);
+
+	if (err) {
+		sprintf(message, "%s sw reset: failed %d", bnad->netdev->name,
+			err);
+		DPRINTK(INFO, "%s", message);
+		return err;
+	}
+
+	/* After the reset, make sure we are in the OPEN state) */
+	if (bnad->state == BNAD_S_OPEN)
+		bnad_port_admin_locked(bnad, BNA_ENABLE);
+
+	return 0;
+}
+
+
+static int
+bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BNA_ASSERT(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check =
+			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+					   IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BNA_ASSERT(skb->protocol == htons(ETH_P_IPV6));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check =
+			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+					 IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod, vlan_tag = 0;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely
+	    (skb->len <= ETH_HLEN || skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely
+	    (wis > BNA_Q_FREE_COUNT(txq) ||
+	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16) (*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+						 (u16)(*txqinfo->
+							    hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely
+		    (wis > BNA_Q_FREE_COUNT(txq) ||
+		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else
+			netif_wake_queue(netdev);
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= txq->q.q_depth);
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode =
+		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+		       BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = (u16) vlan_tx_tag_get(skb);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+	if (test_bit(BNAD_F_CEE_RUNNING, &bnad->flags)) {
+		vlan_tag =
+			(bnad->curr_priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+	}
+
+	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset =
+			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+			      (tcp_hdrlen(skb) >> 2,
+			       skb_transport_offset(skb)));
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset =
+				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+				      (0, skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+				   skb_transport_offset(skb) +
+				   sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BNA_ASSERT(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR);
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr =
+		pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+			       PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+			   dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+						txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+						     wi_range);
+				BNA_ASSERT(wi_range &&
+					   wi_range <= txq->q.q_depth);
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BNA_ASSERT(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR);
+		txqent->vector[vect_id].length = htons(frag->size);
+		BNA_ASSERT(unmap_q->unmap_array[unmap_prod].skb == NULL);
+		dma_addr =
+			pci_map_page(bnad->pcidev, frag->page,
+				     frag->page_offset, frag->size,
+				     PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+				   dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index)
+		tasklet_schedule(&bnad->tx_free_tasklet);
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats *
+bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors =
+		rxstats->rx_fcs_error + rxstats->rx_alignment_error +
+		rxstats->rx_frame_length_error + rxstats->rx_code_error +
+		rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors = bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+/* Should be called with priv_lock held. */
+static void
+bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (netdev->flags & IFF_PROMISC) {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+		bnad->config |= BNAD_CF_PROMISC;
+	} else {
+		bna_rxf_promiscuous(bnad->priv, BNAD_RX_FUNC_ID, BNA_DISABLE);
+		bnad->config &= ~BNAD_CF_PROMISC;
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->config & BNAD_CF_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_DISABLE);
+			bnad->config |= BNAD_CF_ALLMULTI;
+		}
+	} else {
+		if (bnad->config & BNAD_CF_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv, BNAD_RX_FUNC_ID,
+					     BNA_ENABLE);
+			bnad->config &= ~BNAD_CF_ALLMULTI;
+		}
+	}
+
+	if (netdev->mc_count) {
+		struct mac *mcaddr_list;
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list =
+			kzalloc((netdev->mc_count + 1) * sizeof(struct mac),
+				GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+
+		mcaddr_list[0] = bna_bcast_addr;
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				sizeof(struct mac));
+
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+			(const struct mac *)mcaddr_list,
+				 netdev->mc_count + 1);
+
+		/* XXX Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void
+bnad_set_rx_mode(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bnad_set_rx_mode_locked(netdev);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+/* Should be called with conf_lock held. */
+int
+bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+	       unsigned int cmd)
+{
+	int err = 0;
+	char message[BNA_MESSAGE_SIZE];
+	enum bna_status_e(*ucast_mac_func) (struct bna_dev *bna_dev,
+		unsigned int rxf_id, const struct mac *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const struct mac *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err) {
+		if (err == BNA_AGAIN)
+			err = 0;
+		goto ucast_mac_exit;
+	}
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+	if (err == BFI_LL_CMD_NOT_EXEC)
+		err = 0;
+
+ucast_mac_exit:
+	if (err) {
+		sprintf(message, "%s unicast MAC address command %d failed: %d",
+			bnad->netdev->name, cmd, err);
+		DPRINTK(INFO, "%s",
+			message);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Should be called with conf_lock held. */
+static int
+bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *) sa->sa_data,
+			     BNAD_UCAST_MAC_SET);
+	if (err)
+		return err;
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int
+bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_conf_unlock();
+	return err;
+
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+	struct bnad *bnad = netdev_priv(netdev);
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_conf_lock();
+	netdev->mtu = new_mtu;
+	err = bnad_sw_reset_locked(netdev);
+	bnad_conf_unlock();
+
+	return err;
+}
+
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_conf_lock();
+	bnad->vlangrp = grp;
+	bnad_conf_unlock();
+}
+
+static void
+bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+
+	bnad_conf_lock();
+	spin_lock_irq(&bnad->priv_lock);
+	if (bnad->state == BNAD_S_OPEN && !BNAD_NOT_READY(bnad))
+		bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID,
+				 (unsigned int)vid);
+	spin_unlock_irq(&bnad->priv_lock);
+	bnad_conf_unlock();
+}
+
+/* Should be called with priv_lock held. */
+static void
+bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+						 (unsigned int)vlan_id);
+		}
+	}
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_cq_info *cqinfo;
+	int i;
+
+	if (!(bnad->config & BNAD_CF_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	} else {
+		for (i = 0; i < bnad->cq_num; i++) {
+			cqinfo = &bnad->cq_table[i];
+			bnad_disable_rx_irq(bnad, cqinfo);
+			bnad_poll_cq(bnad, cqinfo, BNAD_MAX_Q_DEPTH);
+			bnad_enable_rx_irq(bnad, cqinfo);
+		}
+	}
+}
+#endif
+
+static void
+bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num =
+				min((uint) num_online_cpus(),
+				    (uint) BNAD_MAX_RXQSETS_USED);
+		/* VMware does not use RSS like Linux driver */
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num =
+			bnad->txq_num + bnad->cq_num +
+			BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void
+bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->config & BNAD_CF_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table =
+		kzalloc(bnad->msix_num * sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+			"Tried to get %d MSI-X vectors, only got %d\n",
+			bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+					      bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+					"Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+				"Enabling MSI-X failed: limited (%d) vectors\n",
+				ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+		 "Enabling MSI-X succeeded with %d vectors, %s\n",
+		 bnad->msix_num,
+		 (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+
+	bnad->config &= ~BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void
+bnad_disable_msix(struct bnad *bnad)
+{
+	if (bnad->config & BNAD_CF_MSIX) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->config &= ~BNAD_CF_MSIX;
+	}
+}
+
+static void
+bnad_error(struct bnad *bnad)
+{
+
+	spin_lock_irq(&bnad->priv_lock);
+
+	if (!test_and_clear_bit(BNAD_F_HWERROR, &bnad->flags)) {
+		spin_unlock_irq(&bnad->priv_lock);
+		return;
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	switch (bnad->state) {
+	case BNAD_S_INIT:
+		bnad->state = BNAD_S_INIT_DOWN;
+		break;
+	case BNAD_S_OPEN:
+		bnad->state = BNAD_S_OPEN_DOWN;
+		bnad_stop_data_path(bnad, 1);
+		bnad_cleanup(bnad);
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+}
+
+static void
+bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	switch (bnad->state) {
+	case BNAD_S_INIT_DOWN:
+		bnad->state = BNAD_S_INIT;
+
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+		BNA_ASSERT(netdev->addr_len == sizeof(bnad->perm_addr));
+		memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+		if (is_zero_ether_addr(netdev->dev_addr))
+			memcpy(netdev->dev_addr, bnad->perm_addr,
+			       netdev->addr_len);
+		break;
+	case BNAD_S_OPEN_DOWN:
+		err = bnad_enable_locked(bnad);
+		if (err) {
+			sprintf(message,
+				"%s bnad_enable failed after reset: %d",
+				bnad->netdev->name, err);
+		DPRINTK(INFO, "%s",
+				message);
+		} else {
+			bnad_port_admin_locked(bnad, BNA_ENABLE);
+		}
+		break;
+	case BNAD_S_START:
+	case BNAD_S_INIT_DISABLING:
+	case BNAD_S_OPENING:
+	case BNAD_S_OPEN:
+	case BNAD_S_OPEN_DISABLING:
+	case BNAD_S_CLOSING:
+		BNA_ASSERT(0);
+		/* fall through */
+	default:
+		break;
+	}
+
+}
+
+static void
+bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+	struct bnad *bnad = (struct bnad *)bnad_ptr;
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	unsigned int acked;
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+
+	if ((u16) (*txqinfo->hw_consumer_index) != txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+					 (u16) (*txqinfo->
+						     hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+}
+
+
+static void
+bnad_cee_reconfig_prio(struct bnad *bnad, u8 cee_linkup, unsigned int prio)
+{
+
+
+	if (prio != bnad->curr_priority) {
+		bnad_sw_reset_locked_internal(bnad->netdev);
+	} else {
+		spin_lock_irq(&bnad->priv_lock);
+		if (!cee_linkup)
+			clear_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		else
+			set_bit(BNAD_F_CEE_RUNNING, &bnad->flags);
+		spin_unlock_irq(&bnad->priv_lock);
+	}
+}
+
+static void
+bnad_link_state_notify(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+	unsigned int prio = 0;
+	char message[BNA_MESSAGE_SIZE];
+
+
+	if (bnad->state != BNAD_S_OPEN) {
+		sprintf(message, "%s link up in state %d", netdev->name,
+			bnad->state);
+		DPRINTK(INFO, "%s", message);
+		return;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	link_state = bnad->link_state;
+	cee_linkup = bnad->cee_linkup;
+	if (cee_linkup)
+		prio = bnad->priority;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (link_state == BNAD_LS_UP) {
+		bnad_cee_reconfig_prio(bnad, cee_linkup, prio);
+		if (!netif_carrier_ok(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void
+bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+
+	bnad_conf_lock();
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR)
+		bnad_error(bnad);
+	if (work_flags & BNAD_WF_RESETDONE)
+		bnad_resume_after_reset(bnad);
+
+	if (work_flags & BNAD_WF_LS_NOTIFY)
+		bnad_link_state_notify(bnad);
+
+	bnad_conf_unlock();
+}
+
+static void
+bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0) &&
+			    (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+			cls_timer =
+				bna_calc_coalescing_timer(bnad->priv,
+							  &cq->pkt_rate);
+
+			/* For NAPI version, coalescing timer need to stored */
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+						    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!
+		    (BNA_QE_IN_USE_CNT
+		     (&rxqinfo->skb_unmap_q,
+		      rxqinfo->skb_unmap_q.
+		      q_depth) >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void
+bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+					    bnad->ioc_meminfo[i].len,
+					    bnad->ioc_meminfo[i].kva,
+					    *(dma_addr_t *) &bnad->
+					    ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void
+bna_iocll_enable_cbfn(void *arg, enum bfa_status error)
+{
+	struct bnad *bnad = arg;
+
+
+	if (!error) {
+		bnad->work_flags &= ~BNAD_WF_LS_NOTIFY;
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+
+		if (bnad->state != BNAD_S_UNLOADING)
+			schedule_work(&bnad->work);
+	}
+
+	bnad->ioc_comp_status = error;
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	complete(&bnad->ioc_comp);
+}
+
+void
+bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void
+bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+
+	if (bnad->config & BNAD_CF_MSIX) {
+		if (test_and_clear_bit(BNAD_F_MBOX_IRQ_DISABLED,
+		    &bnad->flags)) {
+			irq = bnad->msix_table[bnad->msix_num - 1].vector;
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+static void
+bnad_ioc_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_timer(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->state != BNAD_S_UNLOADING)
+		mod_timer(&bnad->ioc_timer,
+			  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee));
+
+	/* Allocate memory for dma */
+	dma_kva =
+		pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(), &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/* Ugly... need to remove once CAL is fixed. */
+	((struct bna_dev *) bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/* Invoke cee attach function */
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad, bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void
+bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee *cee = &bnad->cee;
+
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+				    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+	bfa_cee_detach(&bnad->cee);
+}
+
+static int
+bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	int err = 0, i;
+	struct bfa_pcidev pcidev_info;
+	u32 intr_mask;
+
+	if (bnad_msix)
+		bnad->config |= BNAD_CF_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+		     (unsigned long)bnad);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+
+	bnad->rx_dyn_coalesce_on = BNA_TRUE;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		printk(KERN_ERR "port %u failed allocating trace buffer!\n",
+		       bnad->bna_id);
+		return -ENOMEM;
+	}
+
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		printk(KERN_ERR "port %u failed allocating memory for bna\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats =
+		pci_alloc_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+				     &dma_addr);
+	if (!bnad->priv_stats) {
+		printk(KERN_ERR
+		       "port %u failed allocating memory for bna stats\n",
+		       bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats, bna_dma_addr,
+		 bnad->trcmod, bnad->logmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+
+	spin_lock_init(&bnad->priv_lock);
+	init_MUTEX(&bnad->conf_sem);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva =
+				vmalloc(bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva =
+				pci_alloc_consistent(bnad->pcidev,
+						     bnad->ioc_meminfo[i].len,
+						     (dma_addr_t *) &bnad->
+						     ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			printk(KERN_ERR
+			       "port %u failed allocating %u "
+			       "bytes memory for IOC\n",
+			       bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		} else
+			memset(bnad->ioc_meminfo[i].kva, 0,
+			       bnad->ioc_meminfo[i].len);
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo, &pcidev_info,
+			 bnad->trcmod, NULL, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u cee_attach failed: %d\n", bnad->bna_id,
+		       err);
+		goto iocll_detach;
+	}
+
+	if (bnad->config & BNAD_CF_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	setup_timer(&bnad->ioc_timer, bnad_ioc_timeout,
+		    (unsigned long)bnad);
+	mod_timer(&bnad->ioc_timer, jiffies +
+		  msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+
+	bnad_conf_lock();
+	bnad->state = BNAD_S_START;
+
+	init_completion(&bnad->ioc_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	wait_for_completion(&bnad->ioc_comp);
+
+	if (!bnad->ioc_comp_status) {
+		bnad->state = BNAD_S_INIT;
+		bna_port_mac_get(bnad->priv, (struct mac *)bnad->perm_addr);
+	} else {
+		bnad->state = BNAD_S_INIT_DOWN;
+	}
+	bnad_conf_unlock();
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	bna_uninit(bnad->priv);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void
+bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status_e err;
+	char message[BNA_MESSAGE_SIZE];
+
+	if (bnad->priv) {
+
+
+		init_completion(&bnad->ioc_comp);
+
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err || err == BNA_BUSY);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			sprintf(message,
+				"bna_iocll_disable failed, "
+				"clean up and try again");
+		DPRINTK(INFO, "%s", message);
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+
+		sprintf(message, "port %u IOC is disabled", bnad->bna_id);
+		DPRINTK(INFO, "%s", message);
+
+		bnad->state = BNAD_S_UNLOADING;
+
+		/* Stop the timer after disabling IOC. */
+		del_timer_sync(&bnad->ioc_timer);
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+
+		bnad_disable_msix(bnad);
+
+		bnad_cee_detach(bnad);
+
+
+		bna_uninit(bnad->priv);
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+					    bnad->priv_stats,
+					    pci_unmap_addr(bnad,
+							   priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00},
+	{0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	printk(KERN_INFO "bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+	if (!bfad_get_firmware_buf(pdev)) {
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, BNAD_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+				DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"set 32bit consistent DMA mask failed: "
+					"%d\n", err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pdev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	pci_set_drvdata(pdev, netdev);
+
+	bnad = netdev_priv(netdev);
+
+	memset(bnad, 0, sizeof(struct bnad));
+
+	bnad->netdev = netdev;
+	bnad->pcidev = pdev;
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	printk(KERN_INFO "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+#ifdef BNAD_VLAN_FEATURES
+	netdev->vlan_features = netdev->features;
+#endif
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |=
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BNA_ASSERT(netdev->addr_len == ETH_ALEN);
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+	memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		       bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void __devexit
+bnad_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct bnad *bnad;
+
+	if (!netdev)
+		return;
+
+	printk(KERN_INFO "%s bnad_pci_remove\n", netdev->name);
+	bnad = netdev_priv(netdev);
+
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe = bnad_pci_probe,
+	.remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init
+bnad_module_init(void)
+{
+
+	printk(KERN_INFO "Module bna is loaded at 0x%p\n",
+	       __this_module.module_core);
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit
+bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.h net-next-2.6-mod/drivers/net/bna/bnad.h
--- net-next-2.6-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.h	2009-11-12 19:03:38.446395000 -0800
@@ -0,0 +1,350 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include <cee/bfa_cee.h>
+#include "bna.h"
+
+#include "bnad_compat.h"
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF	/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF	/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_F_HWERROR, &(_bnad)->flags)
+#define BNAD_ADMIN_DOWN(_bnad)	(!netif_running((_bnad)->netdev) ||	\
+	test_bit(BNAD_F_BCU_DISABLED, &(_bnad)->flags))
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_conf_lock()	down(&bnad->conf_sem)
+#define bnad_conf_unlock()	up(&bnad->conf_sem)
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+	u32 producer_index;
+	u32 consumer_index;
+	struct bnad_skb_unmap *unmap_array;
+	u32 q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+} ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+} ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+
+enum bnad_state {
+	BNAD_S_START = 0,
+	BNAD_S_INIT = 1,
+	BNAD_S_INIT_DOWN = 2,
+	BNAD_S_INIT_DISABLING = 3,
+	BNAD_S_INIT_DISABLED = 4,
+	BNAD_S_OPENING = 5,
+	BNAD_S_OPEN = 6,
+	BNAD_S_OPEN_DOWN = 7,
+	BNAD_S_OPEN_DISABLING = 8,
+	BNAD_S_OPEN_DISABLED = 9,
+	BNAD_S_CLOSING = 10,
+	BNAD_S_UNLOADING = 11
+};
+
+enum bnad_link_state {
+	BNAD_LS_DOWN = 0,
+	BNAD_LS_UP = 1
+};
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev *priv;
+
+	enum bnad_state state;
+	unsigned long flags;
+#define BNAD_F_BCU_DISABLED		0
+#define BNAD_F_HWERROR			1
+#define BNAD_F_MBOX_IRQ_DISABLED	2
+#define BNAD_F_CEE_RUNNING		3
+
+	unsigned int config;
+#define BNAD_CF_MSIX		0x01
+#define BNAD_CF_PROMISC		0x02
+#define BNAD_CF_ALLMULTI		0x04
+#define BNAD_CF_TXQ_DEPTH	0x10
+#define BNAD_CF_RXQ_DEPTH	0x20
+
+	unsigned int priority;
+	unsigned int curr_priority;	/* currently applied priority */
+
+	enum bnad_link_state link_state;
+	u8 cee_linkup;
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+
+	struct tasklet_struct tx_free_tasklet;	/* For Tx cleanup */
+
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;	/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8 rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+
+	u8 ref_count;
+
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+#define BNAD_WF_CEE_PRIO	0x4
+#define BNAD_WF_LS_NOTIFY	0x8
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;	/* registers */
+	unsigned char perm_addr[ETH_ALEN];
+
+	void *priv_stats;
+	  DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod *trcmod;
+	struct bfa_log_mod *logmod;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+	struct semaphore    conf_sem;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn cee_cbfn;
+	struct bfa_cee cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+
+extern struct semaphore bnad_list_sem;
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset_locked(struct net_device *netdev);
+int bnad_ioc_disabling_locked(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id, u8 * mac_ptr,
+		   unsigned int cmd);
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-03 18:24   ` Rasesh Mody
@ 2009-11-04  0:31     ` Joe Perches
  0 siblings, 0 replies; 30+ messages in thread
From: Joe Perches @ 2009-11-04  0:31 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, Adapter Linux Open SRC Team, Greg Kroah-Hartman

On Tue, 2009-11-03 at 10:24 -0800, Rasesh Mody wrote:
> Joe,
> Thanks for your input. We are in the process addressing the comments that we are getting.

Hi Rasesh.

Thanks for bringing this out to netdev.

I think that with a few hours of cleanup, the code
would be more linux style conforming.  But right now,
it looks a bit odd with too many indirections.

> Can you please give examples or elaborate your comment? It would be really helpful.

OS dependent includes?  Most of them are senseless.

All of bfa_os_inc.h should go elsewhere or be dropped.
All of bna_os should go elsewhere or be dropped.

drop all bna_os_ prefixes

drop all bfa_os_ntohs, etc:

sed -r -i -e 's/\bbfa_os_(nh)to(nh)(sl)\b/\1to\2\3/g' *

Redefine true/false? why?

sed -r -i -e 's/\bbfa_boolean_t\b/bool/g' *
sed -r -i -e 's/\bBFA_TRUE\b/true/g' *
sed -r -i -e 's/\bBFA_FALSE\b/false/g' *

Don't suffix struct names with _s

sed -r -i -e 's/\bstruct\b\s+(\w+)\s+(\w+)_s/struct \1 \2/g' *

bfa_panic -> bfa_os_panic -> nothing

a laundry list like that...

I think it should go into staging for a few weeks, and then
it would be ready to be integrated into a mainline release.

cheers,  Joe 


^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01  5:23 ` Joe Perches
  2009-11-01 19:25   ` Stephen Hemminger
@ 2009-11-03 18:24   ` Rasesh Mody
  2009-11-04  0:31     ` Joe Perches
  1 sibling, 1 reply; 30+ messages in thread
From: Rasesh Mody @ 2009-11-03 18:24 UTC (permalink / raw)
  To: Joe Perches; +Cc: netdev, Adapter Linux Open SRC Team, Greg Kroah-Hartman

Joe,
Thanks for your input. We are in the process addressing the comments that we are getting.

Can you please give examples or elaborate your comment? It would be really helpful.
Thanks,
-- Rasesh

-----Original Message-----
From: Joe Perches [mailto:joe@perches.com] 
Sent: Saturday, October 31, 2009 10:24 PM
To: Rasesh Mody
Cc: netdev@vger.kernel.org; Adapter Linux Open SRC Team; Greg Kroah-Hartman
Subject: Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver

There are an awful lot of non linux standard
uses in this code set.

Perhaps staging would be a good place to start?


^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01  8:02 ` Eric Dumazet
@ 2009-11-03  7:54   ` Debashis Dutt
  0 siblings, 0 replies; 30+ messages in thread
From: Debashis Dutt @ 2009-11-03  7:54 UTC (permalink / raw)
  To: Eric Dumazet, Rasesh Mody; +Cc: netdev, Adapter Linux Open SRC Team

Hi Eric, 

Thanks for your feedback. We are working on addressing these issues.

--Debashis

-----Original Message-----
From: Eric Dumazet [mailto:eric.dumazet@gmail.com] 
Sent: Sunday, November 01, 2009 1:02 AM
To: Rasesh Mody
Cc: netdev@vger.kernel.org; Adapter Linux Open SRC Team
Subject: Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver

Rasesh Mody a écrit :
> From: Rasesh Mody <rmody@brocade.com>
> 
> This is patch 1/6 which contains linux driver source for
> Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
> Re-based source against net-next-2.6 and re-submitting the
> patch with few fixes.
> 
> We wish this patch to be considered for inclusion in net-next-2.6

bnad->netdev->last_rx = jiffies

and

netdev->trans_start = jiffies;

are not necessary, core network handles last_rx/trans_start itself 
and more efficently nowadays.

void bnad_reset_stats(struct net_device *netdev) seems defined but not used.



^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-03  3:14     ` Debashis Dutt
@ 2009-11-03  3:34       ` Greg KH
  0 siblings, 0 replies; 30+ messages in thread
From: Greg KH @ 2009-11-03  3:34 UTC (permalink / raw)
  To: Debashis Dutt
  Cc: Stephen Hemminger, Joe Perches, Rasesh Mody, netdev,
	Adapter Linux Open SRC Team

On Mon, Nov 02, 2009 at 07:14:20PM -0800, Debashis Dutt wrote:
> Hi Stephen, 
> 
> This driver is written explicitly for Linux and we are working on addressing your
> comments.
> 
> Please let us know, if we need to look at some other areas.

Do you want me to put it in the drivers/staging/ tree now so you will
have a place to work on it in the kernel tree and others can use it as
well?

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01 19:25   ` Stephen Hemminger
@ 2009-11-03  3:14     ` Debashis Dutt
  2009-11-03  3:34       ` Greg KH
  0 siblings, 1 reply; 30+ messages in thread
From: Debashis Dutt @ 2009-11-03  3:14 UTC (permalink / raw)
  To: Stephen Hemminger, Joe Perches
  Cc: Rasesh Mody, netdev, Adapter Linux Open SRC Team, Greg Kroah-Hartman

Hi Stephen, 

This driver is written explicitly for Linux and we are working on addressing your
comments.

Please let us know, if we need to look at some other areas.

Thanks
--Debashis

-----Original Message-----
From: Stephen Hemminger [mailto:shemminger@vyatta.com] 
Sent: Sunday, November 01, 2009 11:26 AM
To: Joe Perches
Cc: Rasesh Mody; netdev@vger.kernel.org; Adapter Linux Open SRC Team; Greg Kroah-Hartman
Subject: Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver

On Sat, 31 Oct 2009 22:23:59 -0700
Joe Perches <joe@perches.com> wrote:

> There are an awful lot of non linux standard
> uses in this code set.
> 
> Perhaps staging would be a good place to start?
> 

Yup, it looks like a Windows driver port

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01 19:19 ` Stephen Hemminger
@ 2009-11-03  3:05   ` Debashis Dutt
  0 siblings, 0 replies; 30+ messages in thread
From: Debashis Dutt @ 2009-11-03  3:05 UTC (permalink / raw)
  To: Stephen Hemminger, Rasesh Mody; +Cc: netdev, Adapter Linux Open SRC Team

Hi Stephen, 

Thanks for your feedback. Please 
see the responses inline.

Thanks

-----Original Message-----
From: Stephen Hemminger [mailto:shemminger@vyatta.com] 
Sent: Sunday, November 01, 2009 11:20 AM
To: Rasesh Mody
Cc: netdev@vger.kernel.org; Adapter Linux Open SRC Team
Subject: Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver

Too many configuration optons?
[Debashis] 
Yes, we are working on incorporating your feedback.

On Sat, 31 Oct 2009 22:03:14 -0700
Rasesh Mody <rmody@brocade.com> wrote:

> +
> +#ifdef BNAD_NO_IP_ALIGN
> +#define BNAD_NET_IP_ALIGN 0
> +#else
> +#define BNAD_NET_IP_ALIGN NET_IP_ALIGN
> +#endif
>

Why is this device special?
[Debashis] 
Will remove



> +
> +
> +#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
> +

Module parameters mean the hardware or the developer could not
decide how to do it right.  Please reduce or eliminate most of these.


> +static uint bnad_msix = 1;
> +module_param(bnad_msix, uint, 0444);
> +MODULE_PARM_DESC(bnad_msix, "Enable MSI-X");

If msi-X is available use it, if not then don't. User can handle
this globally with kernel command line option.

> +uint bnad_small_large_rxbufs = 1;
> +module_param(bnad_small_large_rxbufs, uint, 0444);
> +MODULE_PARM_DESC(bnad_small_large_rxbufs, "Enable small/large buffer receive");

Do or do not, please no config option.  The ideal case is:
  normal MTU == skb
  jumbo MTU = skb with fragmenets
[Debashis] 
Will address this.

> +static uint bnad_rxqsets_used;
> +module_param(bnad_rxqsets_used, uint, 0444);
> +MODULE_PARM_DESC(bnad_rxqsets_used, "Number of RxQ sets to be used");
> +
> +static uint bnad_ipid_mode;
> +module_param(bnad_ipid_mode, uint, 0444);
> +MODULE_PARM_DESC(bnad_ipid_mode, "0 - Use IP ID 0x0000 - 0x7FFF for LSO; "
> +    "1 - Use full range of IP ID for LSO");

Gack!
[Debashis] 
Will remove this


> +uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
> +module_param(bnad_txq_depth, uint, 0444);
> +MODULE_PARM_DESC(bnad_txq_depth, "Maximum number of entries per TxQ");

Should be ethtool configuration not module parameters
[Debashis] 
Yes, sure.

> +uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
> +module_param(bnad_rxq_depth, uint, 0444);
> +MODULE_PARM_DESC(bnad_rxq_depth, "Maximum number of entries per RxQ");
> +
> +static uint bnad_vlan_strip = 1;
> +module_param(bnad_vlan_strip, uint, 0444);
> +MODULE_PARM_DESC(bnad_vlan_strip, "Let the hardware strip off VLAN header");

Just do VLAN acceleration.
[Debashis] 
Yes, sure

> +static uint bnad_log_level = LOG_WARN_LEVEL;
> +module_param(bnad_log_level, uint, 0644);
> +MODULE_PARM_DESC(bnad_log_level, "Log level");

Use ethtool msg_level for this
[Debashis] 
Will do.

> +static uint bnad_ioc_auto_recover = 1;
> +module_param(bnad_ioc_auto_recover, uint, 0644);
> +MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");

Why is this configurable?
[Debashis] 
Auto-recovery is used to recover automatically from a f/w failure. 
Disabling this helps preserve the state of the system/driver in case of a f/w failure

-- 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01  5:23 ` Joe Perches
@ 2009-11-01 19:25   ` Stephen Hemminger
  2009-11-03  3:14     ` Debashis Dutt
  2009-11-03 18:24   ` Rasesh Mody
  1 sibling, 1 reply; 30+ messages in thread
From: Stephen Hemminger @ 2009-11-01 19:25 UTC (permalink / raw)
  To: Joe Perches
  Cc: Rasesh Mody, netdev, adapter_linux_open_src_team, Greg Kroah-Hartman

On Sat, 31 Oct 2009 22:23:59 -0700
Joe Perches <joe@perches.com> wrote:

> There are an awful lot of non linux standard
> uses in this code set.
> 
> Perhaps staging would be a good place to start?
> 

Yup, it looks like a Windows driver port

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01  5:03 Rasesh Mody
  2009-11-01  5:23 ` Joe Perches
  2009-11-01  8:02 ` Eric Dumazet
@ 2009-11-01 19:19 ` Stephen Hemminger
  2009-11-03  3:05   ` Debashis Dutt
  2 siblings, 1 reply; 30+ messages in thread
From: Stephen Hemminger @ 2009-11-01 19:19 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team

Too many configuration optons?

On Sat, 31 Oct 2009 22:03:14 -0700
Rasesh Mody <rmody@brocade.com> wrote:

> +
> +#ifdef BNAD_NO_IP_ALIGN
> +#define BNAD_NET_IP_ALIGN 0
> +#else
> +#define BNAD_NET_IP_ALIGN NET_IP_ALIGN
> +#endif
>

Why is this device special?



> +
> +
> +#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
> +

Module parameters mean the hardware or the developer could not
decide how to do it right.  Please reduce or eliminate most of these.


> +static uint bnad_msix = 1;
> +module_param(bnad_msix, uint, 0444);
> +MODULE_PARM_DESC(bnad_msix, "Enable MSI-X");

If msi-X is available use it, if not then don't. User can handle
this globally with kernel command line option.

> +uint bnad_small_large_rxbufs = 1;
> +module_param(bnad_small_large_rxbufs, uint, 0444);
> +MODULE_PARM_DESC(bnad_small_large_rxbufs, "Enable small/large buffer receive");

Do or do not, please no config option.  The ideal case is:
  normal MTU == skb
  jumbo MTU = skb with fragmenets

> +static uint bnad_rxqsets_used;
> +module_param(bnad_rxqsets_used, uint, 0444);
> +MODULE_PARM_DESC(bnad_rxqsets_used, "Number of RxQ sets to be used");
> +
> +static uint bnad_ipid_mode;
> +module_param(bnad_ipid_mode, uint, 0444);
> +MODULE_PARM_DESC(bnad_ipid_mode, "0 - Use IP ID 0x0000 - 0x7FFF for LSO; "
> +    "1 - Use full range of IP ID for LSO");

Gack!


> +uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
> +module_param(bnad_txq_depth, uint, 0444);
> +MODULE_PARM_DESC(bnad_txq_depth, "Maximum number of entries per TxQ");

Should be ethtool configuration not module parameters

> +uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
> +module_param(bnad_rxq_depth, uint, 0444);
> +MODULE_PARM_DESC(bnad_rxq_depth, "Maximum number of entries per RxQ");
> +
> +static uint bnad_vlan_strip = 1;
> +module_param(bnad_vlan_strip, uint, 0444);
> +MODULE_PARM_DESC(bnad_vlan_strip, "Let the hardware strip off VLAN header");

Just do VLAN acceleration.

> +static uint bnad_log_level = LOG_WARN_LEVEL;
> +module_param(bnad_log_level, uint, 0644);
> +MODULE_PARM_DESC(bnad_log_level, "Log level");

Use ethtool msg_level for this

> +static uint bnad_ioc_auto_recover = 1;
> +module_param(bnad_ioc_auto_recover, uint, 0644);
> +MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");

Why is this configurable?


-- 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01  5:03 Rasesh Mody
  2009-11-01  5:23 ` Joe Perches
@ 2009-11-01  8:02 ` Eric Dumazet
  2009-11-03  7:54   ` Debashis Dutt
  2009-11-01 19:19 ` Stephen Hemminger
  2 siblings, 1 reply; 30+ messages in thread
From: Eric Dumazet @ 2009-11-01  8:02 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team

Rasesh Mody a écrit :
> From: Rasesh Mody <rmody@brocade.com>
> 
> This is patch 1/6 which contains linux driver source for
> Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
> Re-based source against net-next-2.6 and re-submitting the
> patch with few fixes.
> 
> We wish this patch to be considered for inclusion in net-next-2.6

bnad->netdev->last_rx = jiffies

and

netdev->trans_start = jiffies;

are not necessary, core network handles last_rx/trans_start itself 
and more efficently nowadays.

void bnad_reset_stats(struct net_device *netdev) seems defined but not used.



^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-11-01  5:03 Rasesh Mody
@ 2009-11-01  5:23 ` Joe Perches
  2009-11-01 19:25   ` Stephen Hemminger
  2009-11-03 18:24   ` Rasesh Mody
  2009-11-01  8:02 ` Eric Dumazet
  2009-11-01 19:19 ` Stephen Hemminger
  2 siblings, 2 replies; 30+ messages in thread
From: Joe Perches @ 2009-11-01  5:23 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, adapter_linux_open_src_team, Greg Kroah-Hartman

There are an awful lot of non linux standard
uses in this code set.

Perhaps staging would be a good place to start?


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2009-11-01  5:03 Rasesh Mody
  2009-11-01  5:23 ` Joe Perches
                   ` (2 more replies)
  0 siblings, 3 replies; 30+ messages in thread
From: Rasesh Mody @ 2009-11-01  5:03 UTC (permalink / raw)
  To: netdev; +Cc: adapter_linux_open_src_team

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
Re-based source against net-next-2.6 and re-submitting the
patch with few fixes.

We wish this patch to be considered for inclusion in net-next-2.6

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3515 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  370 ++++++
 2 files changed, 3885 insertions(+)

diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.c net-next-2.6-mod/drivers/net/bna/bnad.c
--- net-next-2.6-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.c	2009-10-31 21:34:47.559538000 -0700
@@ -0,0 +1,3515 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#include <net/checksum.h>
+
+#include "bnad.h"
+#include "bna_os.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#ifdef BNAD_NO_IP_ALIGN
+#define BNAD_NET_IP_ALIGN 0
+#else
+#define BNAD_NET_IP_ALIGN NET_IP_ALIGN
+#endif
+
+
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+
+
+static uint bnad_msix = 1;
+module_param(bnad_msix, uint, 0444);
+MODULE_PARM_DESC(bnad_msix, "Enable MSI-X");
+
+uint bnad_small_large_rxbufs = 1;
+module_param(bnad_small_large_rxbufs, uint, 0444);
+MODULE_PARM_DESC(bnad_small_large_rxbufs, "Enable small/large buffer receive");
+
+static uint bnad_rxqsets_used;
+module_param(bnad_rxqsets_used, uint, 0444);
+MODULE_PARM_DESC(bnad_rxqsets_used, "Number of RxQ sets to be used");
+
+static uint bnad_ipid_mode;
+module_param(bnad_ipid_mode, uint, 0444);
+MODULE_PARM_DESC(bnad_ipid_mode, "0 - Use IP ID 0x0000 - 0x7FFF for LSO; "
+    "1 - Use full range of IP ID for LSO");
+
+uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+module_param(bnad_txq_depth, uint, 0444);
+MODULE_PARM_DESC(bnad_txq_depth, "Maximum number of entries per TxQ");
+
+uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+module_param(bnad_rxq_depth, uint, 0444);
+MODULE_PARM_DESC(bnad_rxq_depth, "Maximum number of entries per RxQ");
+
+static uint bnad_vlan_strip = 1;
+module_param(bnad_vlan_strip, uint, 0444);
+MODULE_PARM_DESC(bnad_vlan_strip, "Let the hardware strip off VLAN header");
+
+static uint bnad_log_level = LOG_WARN_LEVEL;
+module_param(bnad_log_level, uint, 0644);
+MODULE_PARM_DESC(bnad_log_level, "Log level");
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0644);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq;
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static int bnad_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open			= bnad_open,
+	.ndo_stop			= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+	.ndo_set_multicast_list		= bnad_set_rx_mode,
+	.ndo_set_mac_address		= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+	.ndo_do_ioctl			= bnad_ioctl,
+
+	.ndo_vlan_rx_register		= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid		= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid		= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller		= bnad_netpoll,
+#endif
+};
+static int bnad_check_module_params(void)
+{
+	/* bnad_msix */
+	if (bnad_msix && bnad_msix != 1)
+		printk(KERN_WARNING "bna: bnad_msix should be 0 or 1, "
+		    "%u is invalid, set bnad_msix to 1\n", bnad_msix);
+
+	/* bnad_small_large_rxbufs */
+	if (bnad_small_large_rxbufs && bnad_small_large_rxbufs != 1)
+		printk(KERN_WARNING "bna: bnad_small_large_rxbufs should be "
+		    "0 or 1, %u is invalid, set bnad_small_large_rxbufs to 1\n",
+		    bnad_small_large_rxbufs);
+	if (bnad_small_large_rxbufs)
+		bnad_rxqs_per_cq = 2;
+	else
+		bnad_rxqs_per_cq = 1;
+
+	/* bnad_rxqsets_used */
+	if (bnad_rxqsets_used > BNAD_MAX_RXQS / bnad_rxqs_per_cq) {
+		printk(KERN_ERR "bna: the maximum value for bnad_rxqsets_used "
+		    "is %u, %u is invalid\n",
+		    BNAD_MAX_RXQS / bnad_rxqs_per_cq, bnad_rxqsets_used);
+		return -EINVAL;
+	}
+	if (!BNA_POWER_OF_2(bnad_rxqsets_used)) {
+		printk(KERN_ERR "bna: bnad_rxqsets_used should be power of 2, "
+		    "%u is invalid\n", bnad_rxqsets_used);
+		return -EINVAL;
+	}
+	if (bnad_rxqsets_used > (uint)num_online_cpus())
+		printk(KERN_WARNING "bna: set bnad_rxqsets_used (%u) "
+		    "larger than number of CPUs (%d) may not be helpful\n",
+		    bnad_rxqsets_used, num_online_cpus());
+
+	/* bnad_ipid_mode */
+	if (bnad_ipid_mode && bnad_ipid_mode != 1) {
+		printk(KERN_ERR "bna: bnad_ipid_mode should be 0 or 1, "
+		    "%u is invalid\n", bnad_ipid_mode);
+		return -EINVAL;
+	}
+
+	/* bnad_txq_depth */
+	if (bnad_txq_depth > BNAD_MAX_Q_DEPTH) {
+		printk(KERN_ERR "bna: bnad_txq_depth should be <= %u, "
+		    "%u is invalid\n", BNAD_MAX_Q_DEPTH, bnad_txq_depth);
+		return -EINVAL;
+	}
+	if (!BNA_POWER_OF_2(bnad_txq_depth)) {
+		printk(KERN_ERR "bna: bnad_txq_depth should be power of 2, "
+		    "%u is invalid\n", bnad_txq_depth);
+		return -EINVAL;
+	}
+	if (bnad_txq_depth < BNAD_MIN_Q_DEPTH) {
+		printk(KERN_ERR "bna: bnad_txq_depth should be >= %u, "
+		    "%u is invalid\n", BNAD_MIN_Q_DEPTH, bnad_txq_depth);
+		return -EINVAL;
+	}
+
+	/* bnad_rxq_depth */
+	if (bnad_rxq_depth > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq) {
+		printk(KERN_ERR "bna: bnad_rxq_depth should be <= %u, "
+		    "%u is invalid\n", BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq,
+		    bnad_rxq_depth);
+		return -EINVAL;
+	}
+	if (!BNA_POWER_OF_2(bnad_rxq_depth)) {
+		printk(KERN_ERR "bna: bnad_rxq_depth should be power of 2, "
+		    "%u is invalid\n", bnad_rxq_depth);
+		return -EINVAL;
+	}
+	if (bnad_rxq_depth < BNAD_MIN_Q_DEPTH) {
+		printk(KERN_ERR "bna: bnad_rxq_depth should be >= %u, "
+		    "%u is invalid\n", BNAD_MIN_Q_DEPTH, bnad_rxq_depth);
+		return -EINVAL;
+	}
+
+	/* bnad_vlan_strip */
+	if (bnad_vlan_strip && bnad_vlan_strip != 1)
+		printk(KERN_WARNING "bna: bnad_vlan_strip should be 0 or 1, "
+		    "%u is invalid, set bnad_vlan_strip to 1\n",
+		    bnad_vlan_strip);
+
+	/* bnad_ioc_auto_recover */
+	if (bnad_ioc_auto_recover && bnad_ioc_auto_recover != 1)
+		printk(KERN_WARNING
+			"bna: bnad_ioc_auto_recover should be 0 or 1, "
+		    "%u is invalid, set bnad_ioc_auto_recover to 1\n",
+		    bnad_ioc_auto_recover);
+
+
+	return 0;
+}
+
+u32 bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	bnad_log_level = msglevel;
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+    u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+	    updated_txq_cons, txqinfo->txq.q.q_depth);
+	BNA_ASSERT(wis <=
+	    BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BNA_ASSERT(skb);
+		unmap_array[unmap_cons].skb = NULL;
+		BNA_ASSERT(wis >=
+		    BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags));
+		BNA_ASSERT(((txqinfo->skb_unmap_q.producer_index -
+		    unmap_cons) & (txqinfo->skb_unmap_q.q_depth - 1)) >=
+		    1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+		    pci_unmap_addr(&unmap_array[unmap_cons], dma_addr),
+		    skb_headlen(skb), PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+			    pci_unmap_addr(&unmap_array[unmap_cons], dma_addr),
+			    skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+			    0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+			    txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static int bnad_lro_get_skb_header(struct sk_buff *skb, void **iphdr,
+    void **tcphdr, u64 *hdr_flags, void *priv)
+{
+	struct bna_cq_entry *cmpl = priv;
+	u32 flags = ntohl(cmpl->flags);
+
+	if ((flags & BNA_CQ_EF_IPV4) && (flags & BNA_CQ_EF_TCP)) {
+		skb_reset_network_header(skb);
+		skb_set_transport_header(skb, ip_hdrlen(skb));
+		*iphdr = ip_hdr(skb);
+		*tcphdr = tcp_hdr(skb);
+		*hdr_flags = LRO_IPV4 | LRO_TCP;
+		return 0;
+	} else {
+		return -1;
+	}
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->txq_table[i].ib, 0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->cq_table[i].ib, 0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->txq_table[i].ib, bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->cq_table[i].ib,
+			bnad->cq_table[i].rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void
+bnad_disable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void
+bnad_enable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+	    cqinfo->rx_coalescing_timeo);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	DPRINTK(DEBUG, "%s ", netdev->name);
+	DPRINTK(DEBUG, "TxQ hw consumer index %u\n",
+		*txqinfo->hw_consumer_index);
+	 sent = bnad_free_txbufs(txqinfo,
+	    (u16)(*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+				BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+		DPRINTK(DEBUG, "%s ack TxQ IB %u packets\n",
+			netdev->name, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc = BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+	    rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q,
+			    rxent, wi_range);
+			BNA_ASSERT(wi_range &&
+			    wi_range <= rxqinfo->rxq.q.q_depth);
+		}
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size +
+				BNAD_NET_IP_ALIGN, GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+		skb->dev = rxqinfo->bnad->netdev;
+		skb_reserve(skb, BNAD_NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr = pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+		    rxqinfo->rxq_config.buffer_size, PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod],
+			dma_addr, dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+		    rxqinfo->skb_unmap_q.q_depth) >>
+		    BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int
+bnad_poll_cq(struct bnad *bnad, struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad);
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BNA_ASSERT(wi_range && wi_range <= cqinfo->cq.q.q_depth);
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		prefetch(skb->data - BNAD_NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+		    pci_unmap_addr(
+		    &unmap_q->unmap_array[unmap_q->consumer_index],
+		    dma_addr),
+		    rxqinfo->rxq_config.buffer_size, PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+		wis++;
+		if (likely(--wi_range)) {
+			next_cmpl = cmpl + 1;
+		} else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BNA_ASSERT(wi_range &&
+			    wi_range <= cqinfo->cq.q.q_depth);
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+			BNA_CQ_EF_FCS_ERROR | BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb_any(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely(bnad->rx_csum &&
+		    (((flags & BNA_CQ_EF_IPV4) &&
+		    (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		    (flags & BNA_CQ_EF_IPV6)) &&
+		    (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		    (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BNA_ASSERT(cmpl->vlan_tag);
+			if (skb->ip_summed == CHECKSUM_UNNECESSARY
+			    && (bnad->netdev->features & NETIF_F_LRO)) {
+				lro_vlan_hwaccel_receive_skb(&cqinfo->lro, skb,
+				    bnad->vlangrp, ntohs(cmpl->vlan_tag), cmpl);
+			} else {
+				vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+				    ntohs(cmpl->vlan_tag));
+			}
+
+		} else {
+
+			if (skb->ip_summed == CHECKSUM_UNNECESSARY
+			    && (bnad->netdev->features & NETIF_F_LRO))
+				lro_receive_skb(&cqinfo->lro, skb, cmpl);
+			else
+				netif_receive_skb(skb);
+		}
+		bnad->netdev->last_rx = jiffies;
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	lro_flush_all(&cqinfo->lro);
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+	if (likely(napi_schedule_prep(&cqinfo->napi))) {
+		bnad_disable_rx_irq(bnad, cqinfo);
+		__napi_schedule(&cqinfo->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		DPRINTK(DEBUG, "port %d msix err/mbox irq status 0x%x\n",
+			bnad->bna_id, intr_status);
+		bna_mbox_err_handler(bnad->priv, intr_status);
+	} else {
+		DPRINTK(WARNING, "port %d msix err/mbox irq status 0x%x\n",
+			 bnad->bna_id, intr_status);
+	}
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+	spin_unlock(&bnad->priv_lock);
+
+	if (!intr_status)
+		return IRQ_NONE;
+
+	DPRINTK(DEBUG, "port %u bnad_isr: 0x%x\n", bnad->bna_id, intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		spin_lock(&bnad->priv_lock);
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	}
+
+	if (likely(napi_schedule_prep(&bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__napi_schedule(&bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		DPRINTK(DEBUG,
+			"port %u requests IRQ %u for mailbox in MSI-X mode\n",
+			bnad->bna_id,
+			bnad->msix_table[bnad->msix_num - 1].vector);
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+		    &bnad_msix_err_mbox, 0, bnad->netdev->name,
+		    bnad->netdev);
+	} else {
+		DPRINTK(DEBUG, "port %u requests IRQ %u in INTx mode\n",
+			bnad->bna_id, bnad->pcidev->irq);
+		err = request_irq(bnad->pcidev->irq, &bnad_isr,
+		    IRQF_SHARED, bnad->netdev->name, bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+		    "Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->flags & BNAD_F_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->flags & BNAD_F_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->flags & BNAD_F_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	if (!(bnad->flags & BNAD_F_MSIX))
+		return 0;
+	DPRINTK(DEBUG, "port %u requests irq %u for TxQ %u in MSIX mode\n",
+		bnad->bna_id, bnad->msix_table[txq_id].vector, txq_id);
+	return request_irq(bnad->msix_table[txq_id].vector,
+	    &bnad_msix_tx, 0, bnad->txq_table[txq_id].name,
+	    &bnad->txq_table[txq_id]);
+}
+
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	if (!(bnad->flags & BNAD_F_MSIX))
+		return 0;
+	DPRINTK(DEBUG, "port %u requests irq %u for CQ %u in MSIX mode\n",
+		bnad->bna_id,
+		bnad->msix_table[bnad->txq_num + cq_id].vector, cq_id);
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+	    &bnad_msix_rx, 0, bnad->cq_table[cq_id].name,
+	    &bnad->cq_table[cq_id]);
+}
+
+static void bnad_intx_enable_txrx(struct bnad *bnad)
+{
+	u32 mask;
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_intx_disable(bnad->priv, &mask);
+	mask &= ~0xffff;
+	bna_intx_enable(bnad->priv, mask);
+	for (i = 0; i < bnad->ib_num; i++)
+		bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+
+	if (!(bnad->flags & BNAD_F_MSIX)) {
+		bnad_intx_enable_txrx(bnad);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			printk(KERN_ERR "%s request irq for TxQ %d failed %d\n",
+			    bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+				    &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			printk(KERN_ERR "%s request irq for CQ %u failed %d\n",
+			    bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++)
+			free_irq(entries[bnad->txq_num + i].vector,
+			    &bnad->cq_table[i]);
+	} else {
+		synchronize_irq(bnad->pcidev->irq);
+	}
+}
+
+void bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	BNA_ASSERT(ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+	    &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct net_device *netdev = bnad->netdev;
+
+	DPRINTK(INFO, "%s bnad_link_up_cb\n", netdev->name);
+	if (netif_running(netdev)) {
+		if (!netif_carrier_ok(netdev) &&
+		    !test_bit(BNAD_DISABLED, &bnad->state)) {
+				printk(KERN_INFO "%s link up\n", netdev->name);
+			netif_carrier_on(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	}
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct net_device *netdev = bnad->netdev;
+
+	DPRINTK(INFO, "%s bnad_link_down_cb\n", netdev->name);
+	if (netif_running(netdev)) {
+		if (netif_carrier_ok(netdev)) {
+			printk(KERN_INFO "%s link down\n", netdev->name);
+			netif_carrier_off(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (!test_bit(BNAD_DISABLED, &bnad->state))
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->flags & BNAD_F_MSIX) {
+		if (!test_and_set_bit(BNAD_MBOX_IRQ_DISABLED, &bnad->state)) {
+			irq = bnad->msix_table[bnad->txq_num +
+			    bnad->cq_num].vector;
+			DPRINTK(WARNING, "Disabling Mbox IRQ %d for port %d\n",
+				irq, bnad->bna_id);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (!test_bit(BNAD_REMOVED, &bnad->state))
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	DPRINTK(WARNING, "port %d HW error callback %u\n",
+		bnad->bna_id, status);
+
+	bnad_hw_error(bnad, status);
+}
+
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BNA_ASSERT(BNA_POWER_OF_2(q_depth));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth *
+	    sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	    q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+		    txqinfo->txq.q.q_depth * 4);
+		DPRINTK(DEBUG, "%s allocating Tx unmap Q %d depth %u\n",
+			bnad->netdev->name, i, txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			DPRINTK(ERR, "%s allocating Tx unmap Q %d failed: %d\n",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+		    rxqinfo->rxq.q.q_depth);
+		DPRINTK(INFO, "%s allocating Rx unmap Q %d depth %u\n",
+			bnad->netdev->name, i, rxqinfo->rxq.q.q_depth);
+		if (err) {
+			DPRINTK(ERR, "%s allocating Rx unmap Q %d failed: %d\n",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void
+bnad_reset_q(struct bnad *bnad, struct bna_q *q, struct bnad_unmap_q *unmap_q)
+{
+	u32 _ui;
+	if (q->producer_index != q->consumer_index) {
+		DPRINTK(ERR, "Q producer index %u != ",	q->producer_index);
+		DPRINTK(ERR, "consumer index %u\n", q->consumer_index);
+	}
+	BNA_ASSERT(q->producer_index == q->consumer_index);
+	if (unmap_q->producer_index != unmap_q->consumer_index) {
+		DPRINTK(ERR, "UnmapQ producer index %u != ",
+			unmap_q->producer_index);
+		DPRINTK(ERR, "consumer index %u\n",
+			unmap_q->consumer_index);
+	}
+	BNA_ASSERT(unmap_q->producer_index == unmap_q->consumer_index);
+
+	q->producer_index = 0;
+	q->consumer_index = 0;
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	for (_ui = 0; _ui < unmap_q->q_depth; _ui++)
+		BNA_ASSERT(!unmap_q->unmap_array[_ui].skb);
+}
+
+/* Called with priv_lock. */
+static
+void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+		    pci_unmap_addr(
+		    &unmap_q->unmap_array[unmap_q->consumer_index], dma_addr),
+		    rxqinfo->rxq_config.buffer_size + BNAD_NET_IP_ALIGN,
+		    PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	bnad_reset_q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	txqinfo = &bnad->txq_table[txq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err)
+		goto txq_stop_exit;
+
+	DPRINTK(INFO, "Waiting for %s TxQ %d stop reply\n",
+		bnad->netdev->name, txq_id);
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+txq_stop_exit:
+	if (err)
+		DPRINTK(ERR, "%s bna_txq_stop %d failed %d\n",
+			bnad->netdev->name, txq_id, err);
+	return err;
+}
+
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+
+	struct timeval  tv;
+
+	BNA_ASSERT(!in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	do_gettimeofday(&tv);
+	DPRINTK(DEBUG, "Calling bna_multi_rxq_stop at %ld:%ld\n",
+		tv.tv_sec, tv.tv_usec);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err)
+		goto rxq_stop_exit;
+
+	DPRINTK(INFO, "Waiting for %s RxQs(0x%llx) stop reply\n",
+		bnad->netdev->name, rxq_id_mask);
+	wait_for_completion(&bnad->qstop_comp);
+
+	do_gettimeofday(&tv);
+	DPRINTK(DEBUG, "bna_multi_rxq_stop returned at %ld:%ld\n",
+		tv.tv_sec, tv.tv_usec);
+	err = bnad->qstop_comp_status;
+rxq_stop_exit:
+	if (err)
+		DPRINTK(ERR, "%s bna_multi_rxq_stop(0x%llx) failed %d\n",
+			bnad->netdev->name, rxq_id_mask, err);
+	return err;
+
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+	    container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.netif_rx_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+	    container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	napi_complete(napi);
+	bnad->stats.netif_rx_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll)(struct napi_struct *, int);
+	int i;
+
+	if (bnad->flags & BNAD_F_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi,
+		    napi_poll, 64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+
+static void bnad_detach(struct bnad *bnad)
+{
+	int i;
+
+	ASSERT_RTNL();
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!test_bit(BNAD_RESETTING, &bnad->state)) {
+		/* Graceful detach */
+
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	} else {
+		/* Error */
+		/* XXX Should not write to registers if RESETTING. */
+
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_rxf_disable_old(bnad->priv, BNAD_RX_FUNC_ID);
+
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+	bnad_napi_disable(bnad);
+	bnad_napi_uninit(bnad);
+
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+	netif_tx_disable(bnad->netdev);
+	netif_carrier_off(bnad->netdev);
+}
+
+static int bnad_disable(struct bnad *bnad)
+{
+	int err, i;
+	u64 rxq_id_mask = 0;
+
+	ASSERT_RTNL();
+		DPRINTK(INFO, "bring %s link down\n", bnad->netdev->name);
+		spin_lock_irq(&bnad->priv_lock);
+		bna_port_admin(bnad->priv, BNA_DISABLE);
+		spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_detach(bnad);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			return err;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int bnad_sw_reset(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (!netif_running(bnad->netdev))
+		return 0;
+
+	err = bnad_stop_locked(netdev);
+	if (err) {
+		DPRINTK(WARNING, "%s sw reset: disable failed %d\n",
+			bnad->netdev->name, err);
+		/* Recoverable */
+		return 0;
+	}
+
+	err = bnad_open_locked(netdev);
+	if (err) {
+		DPRINTK(WARNING, "%s sw reset: enable failed %d\n",
+			bnad->netdev->name, err);
+		return err;
+	}
+
+	return 0;
+}
+
+int bnad_resetting(struct bnad *bnad)
+{
+	rtnl_lock();
+	if (netif_running(bnad->netdev))
+		bnad_stop_locked(bnad->netdev);
+	set_bit(BNAD_RESETTING, &bnad->state);
+	rtnl_unlock();
+	return 0;
+}
+
+int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr = pci_alloc_consistent(bnad->pcidev,
+	    L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+	DPRINTK(DEBUG, "%s IB %d dma addr 0x%llx\n",
+		bnad->netdev->name, ib_id, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table = kzalloc(bnad->ib_num *
+	    sizeof(struct bnad_ib_entry), GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+		    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	DPRINTK(DEBUG, "qpt page count 0x%x, ", qpt->page_count);
+	DPRINTK(DEBUG, "page size 0x%x\n", qpt->page_size);
+
+	qpt->kv_qpt_ptr = pci_alloc_consistent(bnad->pcidev,
+	    qpt->page_count * sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+	DPRINTK(DEBUG, "qpt host addr %p, ", qpt->kv_qpt_ptr);
+	DPRINTK(DEBUG, "dma addr 0x%llx\n", dma_addr);
+
+	q->qpt_ptr = kzalloc(qpt->page_count * sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] = pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+		    &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+		    &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+		DPRINTK(DEBUG, "page %d ", (int)i);
+		DPRINTK(DEBUG, "host addr %p, ", q->qpt_ptr[i]);
+		DPRINTK(DEBUG, "dma addr 0x%llx\n", dma_addr);
+	}
+
+	return 0;
+}
+
+static void
+bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt, struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(
+				    &((struct bna_dma_addr *)
+					qpt->kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+				    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+		    qpt->page_count * sizeof(struct bna_dma_addr),
+		    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+	vfree(cqinfo->lro.lro_arr);
+	cqinfo->lro.lro_arr = NULL;
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	DPRINTK(DEBUG, "%s allocating TxQ %d\n", bnad->netdev->name, txq_id);
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+	    bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+	    bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table = kzalloc(bnad->txq_num *
+	    sizeof(struct bnad_txq_info), GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	DPRINTK(DEBUG, "%s allocating RxQ %d\n", bnad->netdev->name, rxq_id);
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+	    bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table = kzalloc(bnad->rxq_num *
+	    sizeof(struct bnad_rxq_info), GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	DPRINTK(DEBUG, "%s allocating CQ %d\n", bnad->netdev->name, cq_id);
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+	    bnad->rxq_depth * bnad_rxqs_per_cq * sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->lro.dev = bnad->netdev;
+	cqinfo->lro.features |= LRO_F_NAPI;
+	if (bnad_vlan_strip)
+		cqinfo->lro.features |= LRO_F_EXTRACT_VLAN_ID;
+	cqinfo->lro.ip_summed = CHECKSUM_UNNECESSARY;
+	cqinfo->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+	cqinfo->lro.max_desc = BNAD_LRO_MAX_DESC;
+	cqinfo->lro.max_aggr = BNAD_LRO_MAX_AGGR;
+	/* XXX */
+	cqinfo->lro.frag_align_pad = 0;
+	cqinfo->lro.lro_arr = vmalloc(BNAD_LRO_MAX_DESC *
+	    sizeof(struct net_lro_desc));
+	if (!cqinfo->lro.lro_arr) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+	memset(cqinfo->lro.lro_arr, 0, BNAD_LRO_MAX_DESC *
+	    sizeof(struct net_lro_desc));
+	cqinfo->lro.get_skb_header = bnad_lro_get_skb_header;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+	    bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table = kzalloc(bnad->cq_num * sizeof(struct bnad_cq_info),
+	    GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu > ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->flags & BNAD_F_TXQ_DEPTH))
+		bnad->txq_depth = bnad_get_qsize(bnad_txq_depth,
+		    bnad->netdev->mtu);
+	if (!(bnad->flags & BNAD_F_RXQ_DEPTH))
+		bnad->rxq_depth = bnad_get_qsize(bnad_rxq_depth,
+		    bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BNA_ASSERT(cq_id < bnad->cq_num && ib_id < bnad->ib_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *)(ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+	ib_config->control_flags = BNA_IB_CF_INT_ENABLE |
+	    BNA_IB_CF_MASTER_ENABLE;
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags = BNA_IB_CF_INTER_PKT_DMA |
+		    BNA_IB_CF_INT_ENABLE | BNA_IB_CF_COALESCING_MODE |
+		    BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->flags & BNAD_F_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BNA_ASSERT(bnad->txf_table && txf_id < bnad->txf_num);
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags = BNA_TXF_CF_VLAN_WI_BASED |
+	    BNA_TXF_CF_ENABLE;
+}
+
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BNA_ASSERT(bnad->rxf_table && rxf_id < bnad->rxf_num);
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type = BNA_RSS_V4_TCP | BNA_RSS_V4_IP |
+		    BNA_RSS_V6_TCP | BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+		    sizeof(rxf_rss->toeplitz_hash_key));
+	}
+	DPRINTK(DEBUG, "%s RxF %u config flags 0x%x\n",
+		bnad->netdev->name, rxf_id, rxf_info->rxf_config.flags);
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table = kzalloc(sizeof(struct bnad_txf_info) * bnad->txf_num,
+				  GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table = kzalloc(sizeof(struct bnad_rxf_info) * bnad->rxf_num,
+				  GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+	    (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	txqinfo->txq_config.priority = txq_id;
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id,
+	    &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+		    (bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+		    bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id,
+	    &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id,
+	    &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else
+			bnad->rit[i].large_rxq_id = i;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET,
+	    bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+	    rxqinfo->skb_unmap_q.q_depth);
+	DPRINTK(INFO, "%s allocated %u rx buffers for RxQ %u\n",
+		bnad->netdev->name, rxbufs, rxq_id);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	spin_unlock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			return err;
+	}
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+	    &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+		    &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	bnad_set_mac_address_locked(netdev, &sa);
+
+	spin_lock_irq(&bnad->priv_lock);
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+
+	bnad_setup_ibs(bnad);
+
+	return 0;
+}
+
+/* Note: bnad_cleanup doesn't free irqs */
+static void bnad_cleanup(struct bnad *bnad)
+{
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_start(struct bnad *bnad)
+{
+	int err;
+
+	ASSERT_RTNL();
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		return err;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit = kzalloc(bnad->cq_num * sizeof(struct bna_rit_entry),
+	    GFP_KERNEL);
+
+	if (!bnad->rit)
+		goto finished;
+
+	err = bnad_config_hw(bnad);
+	if (err)
+		goto finished;
+
+		bnad_napi_init(bnad);
+		bnad_napi_enable(bnad);
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		DPRINTK(ERR, "%s requests Tx/Rx irqs failed: %d\n",
+			bnad->netdev->name, err);
+		goto finished;
+	}
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	uint i;
+	int err;
+
+	ASSERT_RTNL();
+	DPRINTK(WARNING, "%s open\n", netdev->name);
+
+	if (BNAD_NOT_READY(bnad)) {
+		DPRINTK(WARNING, "%s is not ready yet (0x%lx)\n",
+			netdev->name, bnad->state);
+		return -EBUSY;
+	}
+
+	if (!test_bit(BNAD_DISABLED, &bnad->state)) {
+		DPRINTK(WARNING, "%s is already opened (0x%lx)\n",
+			netdev->name, bnad->state);
+		return -EPERM;
+	}
+
+	err = bnad_start(bnad);
+	if (err) {
+		DPRINTK(ERR, "%s failed to start %d\n", netdev->name, err);
+		return err;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_DISABLED, &bnad->state);
+	DPRINTK(INFO, "%s is opened\n", bnad->netdev->name);
+
+	/* XXX Packet may be come before we bring the port up. */
+	spin_lock_irq(&bnad->priv_lock);
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+
+	DPRINTK(INFO, "Bring %s link up\n", netdev->name);
+		spin_lock_irq(&bnad->priv_lock);
+		bna_port_admin(bnad->priv, BNA_ENABLE);
+		spin_unlock_irq(&bnad->priv_lock);
+
+	mod_timer(&bnad->stats_timer, jiffies + HZ);
+
+	return 0;
+}
+
+int bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	ASSERT_RTNL();
+	DPRINTK(WARNING, "%s stop\n", netdev->name);
+
+	if (test_and_set_bit(BNAD_DISABLED, &bnad->state)) {
+		if (BNAD_NOT_READY(bnad)) {
+			DPRINTK(WARNING, "%s is not ready (0x%lx)\n",
+				netdev->name, bnad->state);
+			return -EBUSY;
+		} else {
+			DPRINTK(WARNING, "%s is already stopped (0x%lx)\n",
+				netdev->name, bnad->state);
+			return -EPERM;
+		}
+	}
+
+	bnad_disable(bnad);
+	bnad_cleanup(bnad);
+	DPRINTK(INFO, "%s is stopped\n", bnad->netdev->name);
+	return 0;
+}
+
+int bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int error = 0;
+
+	bnad_lock();
+	if (!test_bit(BNAD_PORT_DISABLED, &bnad->state))
+		error = bnad_open_locked(netdev);
+	bnad_unlock();
+	return error;
+}
+
+int bnad_stop(struct net_device *netdev)
+{
+	int error = 0;
+
+	bnad_lock();
+	error = bnad_stop_locked(netdev);
+	bnad_unlock();
+	return error;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+	int err;
+
+	BNA_ASSERT(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+	    skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check = ~csum_tcpudp_magic(
+		    iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BNA_ASSERT(skb->protocol == htons(ETH_P_IPV6));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check = ~csum_ipv6_magic(
+		    &ipv6h->saddr, &ipv6h->daddr, 0, IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+	}
+
+	return 0;
+}
+
+netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely(skb->len <= ETH_HLEN ||
+	    skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely(wis > BNA_Q_FREE_COUNT(txq) ||
+	    vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16)(*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+			    (u16)(*txqinfo->hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+			DPRINTK(DEBUG, "%s ack TxQ IB %u packets\n",
+				netdev->name, acked);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else
+			netif_stop_queue(netdev);
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely(wis > BNA_Q_FREE_COUNT(txq) ||
+		    vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else {
+				netif_wake_queue(netdev);
+		}
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= txq->q.q_depth);
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode = htons((skb_is_gso(skb) ?
+	    BNA_TXQ_WI_SEND_LSO : BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		u16 vlan_tag = (u16)vlan_tx_tag_get(skb);
+		if ((vlan_tag >> 13) & 0x7)
+			flags |= BNA_TXQ_WI_CF_INS_PRIO;
+		if (vlan_tag & VLAN_VID_MASK)
+			flags |= BNA_TXQ_WI_CF_INS_VLAN;
+		txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+	} else
+		txqent->hdr.wi.vlan_tag = 0;
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset = htons(
+		    BNA_TXQ_WI_L4_HDR_N_OFFSET(tcp_hdrlen(skb) >> 2,
+		    skb_transport_offset(skb)));
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset = htons(
+			    BNA_TXQ_WI_L4_HDR_N_OFFSET(0,
+			    skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+			    skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset = htons(
+			    BNA_TXQ_WI_L4_HDR_N_OFFSET(0,
+			    skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+			    skb_transport_offset(skb) + sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BNA_ASSERT(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR);
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+	    PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+	    dma_addr);
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+				    txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+				    wi_range);
+				BNA_ASSERT(wi_range &&
+				    wi_range <= txq->q.q_depth);
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BNA_ASSERT(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR);
+		txqent->vector[vect_id].length = htons(frag->size);
+		BNA_ASSERT(unmap_q->unmap_array[unmap_prod].skb == NULL);
+		dma_addr = pci_map_page(bnad->pcidev, frag->page,
+		    frag->page_offset, frag->size, PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+		    dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+	netdev->trans_start = jiffies;
+
+	if ((u16)(*txqinfo->hw_consumer_index) !=
+	    txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+		    (u16)(*txqinfo->hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats *bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors = rxstats->rx_fcs_error +
+	    rxstats->rx_alignment_error + rxstats->rx_frame_length_error +
+	    rxstats->rx_code_error + rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors =
+	    bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+void bnad_reset_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_rxq_info *rxqinfo;
+	struct bnad_txq_info *txqinfo;
+	int i;
+	memset(&bnad->stats, 0, sizeof(bnad->stats));
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			rxqinfo = &bnad->rxq_table[i];
+			rxqinfo->rx_packets = 0;
+			rxqinfo->rx_bytes = 0;
+			rxqinfo->rx_packets_with_error = 0;
+			rxqinfo->rxbuf_alloc_failed = 0;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			txqinfo = &bnad->txq_table[i];
+			txqinfo->tx_packets = 0;
+			txqinfo->tx_bytes = 0;
+		}
+	}
+}
+
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	unsigned long irq_flags;
+
+	if (BNAD_NOT_READY(bnad))
+		return;
+
+	spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+	if (netdev->flags & IFF_PROMISC) {
+		if (!(bnad->flags & BNAD_F_PROMISC)) {
+			bna_rxf_promiscuous(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_ENABLE);
+			bnad->flags |= BNAD_F_PROMISC;
+		}
+	} else {
+		if (bnad->flags & BNAD_F_PROMISC) {
+			bna_rxf_promiscuous(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_DISABLE);
+			bnad->flags &= ~BNAD_F_PROMISC;
+		}
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->flags & BNAD_F_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_DISABLE);
+			bnad->flags |= BNAD_F_ALLMULTI;
+		}
+	} else {
+		if (bnad->flags & BNAD_F_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_ENABLE);
+			bnad->flags &= ~BNAD_F_ALLMULTI;
+		}
+	}
+	spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+
+	if (netdev->mc_count) {
+		u8 *mcaddr_list;
+		u8 bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list = kzalloc((netdev->mc_count + 1) *
+		    (ETH_ALEN * sizeof(u8)), GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+		memcpy(&mcaddr_list[0], bcast_addr, ETH_ALEN * sizeof(u8));
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				ETH_ALEN * sizeof(u8));
+
+		spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+		    (const u8 *)mcaddr_list, netdev->mc_count + 1);
+		spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	bnad_lock();
+	bnad_set_rx_mode_locked(netdev);
+	bnad_unlock();
+}
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id,
+    u8 *mac_ptr, unsigned int cmd)
+{
+	int err = 0;
+	enum bna_status_e (*ucast_mac_func)(struct bna_dev_s *bna_dev,
+		unsigned int rxf_id, const u8 *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	while (test_and_set_bit(BNAD_SET_UCAST, &bnad->state))
+		msleep(1);
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const u8 *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err)
+		goto ucast_mac_exit;
+
+	DPRINTK(INFO, "Waiting for %s MAC operation %d reply\n",
+		bnad->netdev->name, cmd);
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+ucast_mac_exit:
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_SET_UCAST, &bnad->state);
+	if (err) {
+		printk(KERN_INFO
+		    "%s unicast MAC address command %d failed: %d\n",
+		    bnad->netdev->name, cmd, err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (!BNAD_NOT_READY(bnad)) {
+		err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *)sa->sa_data,
+		    BNAD_UCAST_MAC_SET);
+		if (err)
+			return err;
+	}
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+
+	bnad_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_unlock();
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_lock();
+
+	netdev->mtu = new_mtu;
+
+	err = bnad_sw_reset(netdev);
+
+	bnad_unlock();
+
+	return err;
+}
+
+static int bnad_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	return -EOPNOTSUPP;
+}
+
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_lock();
+	bnad->vlangrp = grp;
+	bnad_unlock();
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	unsigned long irq_flags;
+
+	DPRINTK(INFO, "%s add vlan %u\n", netdev->name, vid);
+	bnad_lock();
+	if (BNAD_NOT_READY(bnad)) {
+		bnad_unlock();
+		return;
+	}
+	spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+	bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID, (unsigned int)vid);
+	spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+	bnad_unlock();
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	unsigned long irq_flags;
+
+	DPRINTK(INFO, "%s remove vlan %u\n", netdev->name, vid);
+	bnad_lock();
+	if (BNAD_NOT_READY(bnad)) {
+		bnad_unlock();
+		return;
+	}
+	spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+	bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID, (unsigned int)vid);
+	spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+	bnad_unlock();
+}
+
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				    (unsigned int)vlan_id);
+		}
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	DPRINTK(INFO, "%s bnad_netpoll\n", netdev->name);
+	/* Does not address MSIX currently */
+	/* TODO : Fix for MSIX */
+	if (!(bnad->flags & BNAD_F_MSIX)) {
+		disable_irq(bnad->pcidev->irq);
+		bnad_isr(bnad->pcidev->irq, netdev);
+		enable_irq(bnad->pcidev->irq);
+	}
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num = min((uint)num_online_cpus(),
+			    (uint)BNAD_MAX_RXQSETS_USED);
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num = bnad->txq_num + bnad->cq_num +
+		    BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->flags & BNAD_F_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table = kzalloc(
+	    bnad->msix_num * sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+	    bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+		    "Tried to get %d MSI-X vectors, only got %d\n",
+		    bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+			    bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+				    "Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+			    "Enabling MSI-X failed: limited (%d) vectors\n",
+			    ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+	    "Enabling MSI-X succeeded with %d vectors, %s\n", bnad->msix_num,
+	    (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+	bnad->flags &= ~BNAD_F_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if ((bnad->flags & BNAD_F_MSIX) && bnad->msix_table) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->flags &= ~BNAD_F_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+	DPRINTK(INFO, "%s bnad_error\n", bnad->netdev->name);
+
+	rtnl_lock();
+	set_bit(BNAD_RESETTING, &bnad->state);
+	if (!test_and_set_bit(BNAD_DISABLED, &bnad->state)) {
+		bnad_detach(bnad);
+		bnad_cleanup(bnad);
+		DPRINTK(WARNING, "%s is disabled upon error\n",
+			bnad->netdev->name);
+	}
+	rtnl_unlock();
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+
+	DPRINTK(WARNING, "port %d resumes after reset\n", bnad->bna_id);
+
+	rtnl_lock();
+	clear_bit(BNAD_RESETTING, &bnad->state);
+
+	bna_port_mac_get(bnad->priv, (u8 *)bnad->perm_addr);
+	BNA_ASSERT(netdev->addr_len == sizeof(bnad->perm_addr));
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+	if (is_zero_ether_addr(netdev->dev_addr))
+		memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	if (netif_running(bnad->netdev)) {
+		err = bnad_open_locked(bnad->netdev);
+		if (err)
+			DPRINTK(ERR, "%s bnad_open failed after reset: %d\n",
+				bnad->netdev->name, err);
+	}
+	rtnl_unlock();
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	DPRINTK(INFO, "port %u bnad_work flags 0x%x\n",
+		bnad->bna_id, bnad->work_flags);
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR) {
+		DPRINTK(INFO, "port %u bnad_work: BNAD_WF_ERROR\n",
+			bnad->bna_id);
+		bnad_error(bnad);
+	}
+
+	if (work_flags & BNAD_WF_RESETDONE) {
+		DPRINTK(INFO, "port %u bnad_work: BNAD_WF_RESETDONE\n",
+			bnad->bna_id);
+		bnad_resume_after_reset(bnad);
+	}
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0)
+			    && (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+				cls_timer = bna_calc_coalescing_timer(
+				bnad->priv, &cq->pkt_rate);
+
+			/*For NAPI version, coalescing timer need to stored*/
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+			    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!(BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+		    rxqinfo->skb_unmap_q.q_depth) >>
+		     BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			DPRINTK(INFO, "%s: RxQ %d more buffers to allocate\n",
+				bnad->netdev->name, i);
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+			    bnad->ioc_meminfo[i].len, bnad->ioc_meminfo[i].kva,
+			    *(dma_addr_t *)&bnad->ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void bna_iocll_enable_cbfn(void *arg, enum bfa_status status)
+{
+	struct bnad *bnad = arg;
+
+	DPRINTK(WARNING, "port %u IOC enable callback, status %d\n",
+		bnad->bna_id, status);
+
+	bnad->ioc_comp_status = status;
+	complete(&bnad->ioc_comp);
+
+	if (!status) {
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+		if (!test_bit(BNAD_REMOVED, &bnad->state))
+			schedule_work(&bnad->work);
+	}
+}
+
+void bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	DPRINTK(WARNING, "port %u IOC disable callback\n",
+		bnad->bna_id);
+	complete(&bnad->ioc_comp);
+}
+
+void bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	DPRINTK(ERR, "port %u IOC HBFail callback\n", bnad->bna_id);
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	DPRINTK(WARNING, "port %u IOC reset callback\n", bnad->bna_id);
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		if (test_and_clear_bit(BNAD_MBOX_IRQ_DISABLED, &bnad->state)) {
+			irq = bnad->msix_table[bnad->txq_num +
+				bnad->cq_num].vector;
+			DPRINTK(WARNING, "Enabling Mbox IRQ %d for port %d\n",
+				irq, bnad->bna_id);
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+static void bnad_ioc_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_timer(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (!test_bit(BNAD_REMOVED, &bnad->state))
+		mod_timer(&bnad->ioc_timer, jiffies +
+			  msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee_s *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee_s));
+
+	/*Allocate memory for dma*/
+	dma_kva = pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(),
+			    &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/*Ugly... need to remove once CAL is fixed.*/
+	((struct bna_dev_s *)bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/*Invoke cee attach function*/
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad,
+	    bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void
+bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee_s *cee = &bnad->cee;
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+		    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+	bfa_cee_detach(&bnad->cee);
+}
+
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	char inst_name[16];
+	int err, i;
+	struct bfa_pcidev_s pcidev_info;
+	u32 intr_mask;
+
+	DPRINTK(DEBUG, "port %u bnad_priv_init\n", bnad->bna_id);
+
+	if (bnad_msix)
+		bnad->flags |= BNAD_F_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	setup_timer(&bnad->stats_timer, bnad_stats_timeo,
+		    (unsigned long)bnad);
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+	bnad->rx_dyn_coalesce_on = BNA_TRUE;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		DPRINTK(ERR, "port %u failed allocating trace buffer!\n",
+			bnad->bna_id);
+		return -ENOMEM;
+	}
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+	sprintf(inst_name, "%u", bnad->bna_id);
+
+	bnad->aen = NULL;
+	INIT_LIST_HEAD(&bnad->file_q);
+	INIT_LIST_HEAD(&bnad->file_free_q);
+	for (i = 0; i < BNAD_AEN_MAX_APPS; i++) {
+		bfa_q_qe_init(&bnad->file_buf[i].qe);
+		list_add_tail(&bnad->file_buf[i].qe, &bnad->file_free_q);
+	}
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		DPRINTK(ERR, "port %u failed allocating memory for bna\n",
+			bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats = pci_alloc_consistent(bnad->pcidev,
+	    BNA_HW_STATS_SIZE, &dma_addr);
+	if (!bnad->priv_stats) {
+		DPRINTK(ERR, "port %u failed allocating memory for bna stats\n",
+			bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+	DPRINTK(DEBUG, "port %u priv_stats dma addr 0x%llx\n",
+		bnad->bna_id, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats,
+	    bna_dma_addr, bnad->trcmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+	spin_lock_init(&bnad->priv_lock);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva = vmalloc(
+			    bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva = pci_alloc_consistent(
+			    bnad->pcidev, bnad->ioc_meminfo[i].len,
+			    (dma_addr_t *)&bnad->ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			DPRINTK(ERR,
+				"port %u failed allocating %u bytes"
+				"memory for IOC\n",
+				bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		}
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo,
+	    &pcidev_info, bnad->trcmod, bnad->aen, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		DPRINTK(ERR, "port %u cee_attach failed: %d\n",
+			bnad->bna_id, err);
+		goto iocll_detach;
+	}
+
+	if (bnad->flags & BNAD_F_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	init_completion(&bnad->ioc_comp);
+	DPRINTK(DEBUG, "port %u enabling IOC ...\n", bnad->bna_id);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	setup_timer(&bnad->ioc_timer, bnad_ioc_timeout,
+		    (unsigned long)bnad);
+	mod_timer(&bnad->ioc_timer, jiffies +
+		  msecs_to_jiffies(BNA_IOC_TIMER_PERIOD));
+
+	DPRINTK(DEBUG, "port %u waiting for IOC ready.\n", bnad->bna_id);
+	wait_for_completion(&bnad->ioc_comp);
+	if (!bnad->ioc_comp_status) {
+		DPRINTK(INFO, "port %u IOC is enabled.\n", bnad->bna_id);
+		bna_port_mac_get(bnad->priv,
+		    (u8 *)bnad->perm_addr);
+	} else {
+		DPRINTK(ERR, "port %u enabling IOC failed: %d\n",
+			bnad->bna_id, bnad->ioc_comp_status);
+		set_bit(BNAD_RESETTING, &bnad->state);
+	}
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	bna_uninit(bnad->priv);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+	    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status_e err;
+
+	if (bnad->priv) {
+		DPRINTK(INFO, "port %u disabling IOC ...\n", bnad->bna_id);
+		init_completion(&bnad->ioc_comp);
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err || err == BNA_BUSY);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			DPRINTK(INFO,
+				"bna_iocll_disable failed,"
+				"clean up and try again\n");
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+		set_bit(BNAD_IOC_DISABLED, &bnad->state);
+		DPRINTK(INFO, "port %u IOC is disabled\n", bnad->bna_id);
+
+		set_bit(BNAD_REMOVED, &bnad->state);
+		/* Stop the timer after disabling IOC. */
+		del_timer_sync(&bnad->ioc_timer);
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+		bnad_disable_msix(bnad);
+
+		bnad_cee_detach(bnad);
+
+		bna_uninit(bnad->priv);
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+			    bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	BNA_ASSERT(list_empty(&bnad->file_q));
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00
+	},
+	{0, 0}
+};
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	DPRINTK(INFO, "bnad_pci_probe(0x%p, 0x%p)\n", pcidev, pcidev_id);
+
+	DPRINTK(DEBUG, "PCI func %d\n", PCI_FUNC(pcidev->devfn));
+	if (!bfad_get_firmware_buf(pcidev)) {
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device(pcidev);
+	if (err) {
+		dev_err(&pcidev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pcidev, BNAD_NAME);
+	if (err) {
+		dev_err(&pcidev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+		DPRINTK(INFO, "64bit DMA mask\n");
+	} else {
+		err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pcidev,
+							  DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pcidev->dev,
+				    "set 32bit consistent DMA mask failed: %d\n"
+					, err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+		DPRINTK(INFO, "32bit DMA mask\n");
+	}
+
+	pci_set_master(pcidev);
+
+	netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pcidev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &pcidev->dev);
+	pci_set_drvdata(pcidev, netdev);
+
+	bnad = netdev_priv(netdev);
+	set_bit(BNAD_DISABLED, &bnad->state);
+	bnad->netdev = netdev;
+	bnad->pcidev = pcidev;
+	mmio_start = pci_resource_start(pcidev, 0);
+	mmio_len = pci_resource_len(pcidev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pcidev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	DPRINTK(INFO, "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	netdev->features |= NETIF_F_IPV6_CSUM;
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+	netdev->features |= NETIF_F_LRO;
+	netdev->vlan_features = netdev->features;
+
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+	    NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BNA_ASSERT(netdev->addr_len == ETH_ALEN);
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+	memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	netif_carrier_off(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		    bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+
+
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pcidev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pcidev);
+disable_device:
+	pci_disable_device(pcidev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pcidev)
+{
+	struct net_device *netdev = pci_get_drvdata(pcidev);
+	struct bnad *bnad;
+
+	DPRINTK(INFO, "%s bnad_pci_remove\n", netdev->name);
+	if (!netdev)
+		return;
+	bnad = netdev_priv(netdev);
+
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pcidev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pcidev);
+	pci_disable_device(pcidev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe    = bnad_pci_probe,
+	.remove   = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+	int err;
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver %s\n", bfa_version);
+	DPRINTK(INFO, "Module bna is loaded at 0x%p\n",
+		__this_module.module_core);
+	err = bnad_check_module_params();
+	if (err)
+		return err;
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
+
diff -ruP net-next-2.6-orig/drivers/net/bna/bnad.h net-next-2.6-mod/drivers/net/bna/bnad.h
--- net-next-2.6-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ net-next-2.6-mod/drivers/net/bna/bnad.h	2009-10-31 21:34:47.566535000 -0700
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include <cee/bfa_cee.h>
+#include "bna.h"
+
+#if !defined(CONFIG_INET_LRO) && !defined(CONFIG_INET_LRO_MODULE)
+#include <net/ip.h>
+#include <net/tcp.h>
+#else
+#include <linux/inet_lro.h>
+#endif
+
+#include "bnad_compat.h"
+
+#define BNAD_LRO_MAX_DESC	8
+#define BNAD_LRO_MAX_AGGR	64
+
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF 	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF		/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF		/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_CQ_PROCESS_LIMIT		512
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_RESETTING, &(_bnad)->state)
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+    (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_lock()
+#define bnad_unlock()
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+    u32    producer_index;
+    u32    consumer_index;
+    struct bnad_skb_unmap *unmap_array;
+    u32	q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+#ifdef DEBUG_TX
+	u32 max_tso;
+	u32 tx_vectors[32];
+#endif
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+}  ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct net_lro_mgr  lro;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+}  ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+struct bnad_diag_lb_params {
+	struct bnad *bnad;
+	struct completion diag_lb_comp;
+	int diag_lb_comp_status;
+	int diag_lb_link_state;
+#define BNAD_DIAG_LB_LS_UNKNOWN	-1
+#define BNAD_DIAG_LB_LS_UP	 0
+#define BNAD_DIAG_LB_LS_DOWN	 1
+};
+
+#define BNAD_AEN_MAX_APPS 8
+struct bnad_aen_file_s {
+	struct list_head  qe;
+	struct bnad *bnad;
+	s32 ri;
+	s32 app_id;
+};
+
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev_s *priv;
+
+	unsigned long state;
+#define BNAD_DISABLED		0
+#define BNAD_RESETTING		1
+#define BNAD_REMOVED		2
+#define BNAD_SET_UCAST		4
+#define BNAD_IOC_DISABLED	5
+#define BNAD_PORT_DISABLED	6
+#define BNAD_MBOX_IRQ_DISABLED	7
+
+	unsigned int flags;
+#define BNAD_F_MSIX		0x01
+#define BNAD_F_PROMISC		0x02
+#define BNAD_F_ALLMULTI		0x04
+#define BNAD_F_WOL		0x08
+#define BNAD_F_TXQ_DEPTH	0x10
+#define BNAD_F_RXQ_DEPTH	0x20
+
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;		/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8	rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+	u8 ref_count;
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+	u16 rsvd_2;
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;			/* registers */
+	unsigned char perm_addr[ETH_ALEN];
+	u32 pci_saved_config[16];
+
+	void *priv_stats;
+	DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod_s *trcmod;
+	struct bfa_log_mod_s *logmod;
+	struct bfa_aen_s *aen;
+	struct bnad_aen_file_s file_buf[BNAD_AEN_MAX_APPS];
+	struct list_head         file_q;
+	struct list_head         file_free_q;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* Diagnostics */
+	struct bna_diag_lb_pkt_stats *lb_stats;
+	struct bnad_diag_lb_params *dlbp;
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn_s cee_cbfn;
+	struct bfa_cee_s cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+extern uint bnad_rxq_depth;
+extern uint bnad_txq_depth;
+extern uint bnad_small_large_rxbufs;
+
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset(struct net_device *netdev);
+int bnad_resetting(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+void bnad_reset_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id,
+		   u8 *mac_ptr, unsigned int cmd);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+int bnad_alloc_ib(struct bnad *bnad, uint ib_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+int bnad_disable_rxq(struct bnad *bnad, u32 rxq_id);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+/* For diagnostics */
+int bnad_diag_lb_rx(struct bnad *bnad, struct sk_buff *skb);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-10-16 18:24 Rasesh Mody
  2009-10-16 20:20 ` Ben Hutchings
@ 2009-10-20  0:54 ` Herbert Xu
  1 sibling, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2009-10-20  0:54 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, amathur

Rasesh Mody <rmody@brocade.com> wrote:
.
> +static int bnad_lro_get_skb_header(struct sk_buff *skb, void **iphdr,
> +    void **tcphdr, u64 *hdr_flags, void *priv)

Please stop using LRO in new code.  The GRO replacement should
be used instead.

Thanks,
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-10-16 20:20 ` Ben Hutchings
@ 2009-10-16 23:19   ` Rasesh Mody
  0 siblings, 0 replies; 30+ messages in thread
From: Rasesh Mody @ 2009-10-16 23:19 UTC (permalink / raw)
  To: Ben Hutchings; +Cc: netdev, Akshay Mathur

Hello Ben,

Thanks a lot for your comments. We will try to address the issues in the following submissions.

--Rasesh Mody
(Brocade Linux Driver Team)

-----Original Message-----
From: Ben Hutchings [mailto:bhutchings@solarflare.com]
Sent: Friday, October 16, 2009 1:20 PM
To: Rasesh Mody
Cc: netdev@vger.kernel.org; Akshay Mathur
Subject: Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver

On Fri, 2009-10-16 at 11:24 -0700, Rasesh Mody wrote:
> From: Rasesh Mody <rmody@brocade.com>
>
> This is patch 1/6 which contains linux driver source for
> Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
>
> We wish this patch to be considered for inclusion in 2.6.32

I think it's a bit late for that.

[...]
> +#ifdef NETIF_F_TSO
> +#include <net/checksum.h>
> +#endif

NETIF_F_TSO is always defined; remove the check.

[...]
> +#ifdef BNAD_NO_IP_ALIGN
> +#undef NET_IP_ALIGN
> +#define NET_IP_ALIGN 0
> +#endif

Don't redefine standard macros.  Define your own which is set to either
NET_IP_ALIGN or 0 as appropriate.

> +#define BNAD_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
> +
> +#define BNAD_RESET_Q(_bnad, _q, _unmap_q)                            \
> +do {                                                                 \
> +     if ((_q)->producer_index != (_q)->consumer_index) {      \
> +             DPRINTK(ERR, "Q producer index %u != ", (_q)->producer_index);      \
> +             DPRINTK(ERR, "consumer index %u\n", (_q)->consumer_index);      \
> +     }                                                               \
> +     BNA_ASSERT((_q)->producer_index == (_q)->consumer_index);      \
> +     if ((_unmap_q)->producer_index != (_unmap_q)->consumer_index) {      \
> +             DPRINTK(ERR, "UnmapQ producer index %u != ", (_unmap_q)->producer_index);      \
> +             DPRINTK(ERR, "consumer index %u\n", (_unmap_q)->consumer_index);      \
> +     }                                                               \
> +     BNA_ASSERT((_unmap_q)->producer_index == \
> +             (_unmap_q)->consumer_index);      \
> +     (_q)->producer_index = 0;       \
> +     (_q)->consumer_index = 0;       \
> +     (_unmap_q)->producer_index = 0; \
> +     (_unmap_q)->consumer_index = 0; \
> +     {       \
> +             u32 _ui;        \
> +             for (_ui = 0; _ui < (_unmap_q)->q_depth; _ui++)         \
> +                     BNA_ASSERT(!(_unmap_q)->unmap_array[_ui].skb);      \
> +     }       \
> +} while (0)

Is there any reason not to write this as a function?  It looks like an
infrequent control operation that shouldn't even be an inline function.

[...]
> +static const struct net_device_ops bnad_netdev_ops = {
> +     .ndo_open                               = bnad_open,
> +     .ndo_stop                               = bnad_stop,
> +     .ndo_start_xmit                 = bnad_start_xmit,
> +     .ndo_get_stats                  = bnad_get_stats,
> +#ifdef HAVE_SET_RX_MODE
> +     .ndo_set_rx_mode                = &bnad_set_rx_mode,
> +#endif

The HAVE_* macros are meant for use by out-of-tree drivers.  There is no
need to test them in in-tree code.

[...]
> +static int bnad_check_module_params(void)
> +{
> +     /* bnad_msix */
> +     if (bnad_msix && bnad_msix != 1)
> +             printk(KERN_WARNING "bna: bnad_msix should be 0 or 1, "
> +                 "%u is invalid, set bnad_msix to 1\n", bnad_msix);
> +
> +     /* bnad_small_large_rxbufs */
> +     if (bnad_small_large_rxbufs && bnad_small_large_rxbufs != 1)
> +             printk(KERN_WARNING "bna: bnad_small_large_rxbufs should be "
> +                 "0 or 1, %u is invalid, set bnad_small_large_rxbufs to 1\n",
> +                 bnad_small_large_rxbufs);
> +     if (bnad_small_large_rxbufs)
> +             bnad_rxqs_per_cq = 2;
> +     else
> +             bnad_rxqs_per_cq = 1;
> +
> +     /* bnad_rxqsets_used */
> +     if (bnad_rxqsets_used > BNAD_MAX_RXQS / bnad_rxqs_per_cq) {
> +             printk(KERN_ERR "bna: the maximum value for bnad_rxqsets_used "
> +                 "is %u, %u is invalid\n",
> +                 BNAD_MAX_RXQS / bnad_rxqs_per_cq, bnad_rxqsets_used);
> +             return -EINVAL;
> +     }

There is a cleaner way to validate and reject module parameter values
which is to define the parameters with module_param_call().

> +static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
> +{
> +     u16 to_alloc, alloced, unmap_prod, wi_range;
> +     struct bnad_skb_unmap *unmap_array;
> +     struct bna_rxq_entry *rxent;
> +     struct sk_buff *skb;
> +     dma_addr_t dma_addr;
> +
> +     alloced = 0;
> +     to_alloc = BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
> +         rxqinfo->skb_unmap_q.q_depth);
> +
> +     unmap_array = rxqinfo->skb_unmap_q.unmap_array;
> +     unmap_prod = rxqinfo->skb_unmap_q.producer_index;
> +     BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
> +     BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
> +
> +     while (to_alloc--) {
> +             if (!wi_range) {
> +                     BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q,
> +                         rxent, wi_range);
> +                     BNA_ASSERT(wi_range &&
> +                         wi_range <= rxqinfo->rxq.q.q_depth);
> +             }
> +#ifdef BNAD_RXBUF_HEADROOM
> +             skb = netdev_alloc_skb(rxqinfo->bnad->netdev,
> +                 rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN);
> +#else
> +             skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
> +                 GFP_ATOMIC);
> +#endif

Why is this conditional?

[...]
> +static irqreturn_t bnad_msix_rx(int irq, void *data)
> +{
> +     struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
> +     struct bnad *bnad = cqinfo->bnad;
> +
> +             if (likely(netif_rx_schedule_prep(bnad->netdev,
> +                     &cqinfo->napi))) {
> +                     bnad_disable_rx_irq(bnad, cqinfo);
> +                     __netif_rx_schedule(bnad->netdev, &cqinfo->napi);
> +             }
> +
> +     return IRQ_HANDLED;
> +}

Indentation is wrong.

[...]
> +static irqreturn_t bnad_isr(int irq, void *data)
> +{
> +     struct net_device *netdev = data;
> +     struct bnad *bnad = netdev_priv(netdev);
> +     u32 intr_status;
> +
> +     spin_lock(&bnad->priv_lock);
> +     bna_intr_status_get(bnad->priv, &intr_status);
> +     spin_unlock(&bnad->priv_lock);
> +
> +     if (!intr_status)
> +             return IRQ_NONE;
> +
> +     DPRINTK(DEBUG, "port %u bnad_isr: 0x%x\n", bnad->bna_id, intr_status);
> +     if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
> +             spin_lock(&bnad->priv_lock);
> +             bna_mbox_err_handler(bnad->priv, intr_status);
> +             spin_unlock(&bnad->priv_lock);
> +             if (BNA_IS_ERR_INTR(intr_status) ||
> +                 !BNA_IS_INTX_DATA_INTR(intr_status))
> +                     goto exit_isr;
> +     }
> +
> +     if (likely(netif_rx_schedule_prep(bnad->netdev,
> +         &bnad->cq_table[0].napi))) {
> +             bnad_disable_txrx_irqs(bnad);
> +             __netif_rx_schedule(bnad->netdev, &bnad->cq_table[0].napi);
> +     }

These functions don't exist any more!

[...]
> +static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
> +{
> +     BNA_ASSERT(txq_id < bnad->txq_num);
> +     if (!(bnad->flags & BNAD_F_MSIX))
> +             return 0;
> +     DPRINTK(DEBUG, "port %u requests irq %u for TxQ %u in MSIX mode\n",
> +             bnad->bna_id, bnad->msix_table[txq_id].vector, txq_id);
> +     return request_irq(bnad->msix_table[txq_id].vector,
> +         (irq_handler_t)&bnad_msix_tx, 0, bnad->txq_table[txq_id].name,

Why are you casting this function pointer?  It has the right type
already, and if it didn't then casting wouldn't fix the matter.

> +         &bnad->txq_table[txq_id]);
> +}
> +
> +int bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
> +{
> +     BNA_ASSERT(cq_id < bnad->cq_num);
> +     if (!(bnad->flags & BNAD_F_MSIX))
> +             return 0;
> +     DPRINTK(DEBUG, "port %u requests irq %u for CQ %u in MSIX mode\n",
> +             bnad->bna_id,
> +             bnad->msix_table[bnad->txq_num + cq_id].vector, cq_id);
> +     return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
> +         (irq_handler_t)&bnad_msix_rx, 0, bnad->cq_table[cq_id].name,

Same here.

[...]
> +static void bnad_link_up_cb(void *arg, u8 status)
> +{
> +     struct bnad *bnad = (struct bnad *)arg;
> +     struct net_device *netdev = bnad->netdev;
> +
> +     DPRINTK(INFO, "%s bnad_link_up_cb\n", netdev->name);
> +     if (netif_running(netdev)) {
> +             if (!netif_carrier_ok(netdev) &&
> +                 !test_bit(BNAD_DISABLED, &bnad->state)) {
> +                             printk(KERN_INFO "%s link up\n", netdev->name);
> +                     netif_carrier_on(netdev);
> +                     netif_wake_queue(netdev);
> +                     bnad->stats.netif_queue_wakeup++;
> +             }
> +     }
> +}
> +
> +static void bnad_link_down_cb(void *arg, u8 status)
> +{
> +     struct bnad *bnad = (struct bnad *)arg;
> +     struct net_device *netdev = bnad->netdev;
> +
> +     DPRINTK(INFO, "%s bnad_link_down_cb\n", netdev->name);
> +     if (netif_running(netdev)) {
> +             if (netif_carrier_ok(netdev)) {
> +                     printk(KERN_INFO "%s link down\n", netdev->name);
> +                     netif_carrier_off(netdev);
> +                     netif_stop_queue(netdev);
> +                     bnad->stats.netif_queue_stop++;
> +             }
> +     }
> +}

There is no need to wake/stop the TX queues here; the netdev core
understands that TX queues must be stopped while the link is down.

[...]
> +static void bnad_detach(struct bnad *bnad)
> +{
[...]
> +     /* Wait to make sure Tx and Rx are stopped. */
> +     msleep(1000);
> +     bnad_free_txrx_irqs(bnad);
> +     bnad_sync_mbox_irq(bnad);
> +
> +             bnad_napi_disable(bnad);
> +             bnad_napi_uninit(bnad);
> +
> +     /* Delete the stats timer after synchronize with mbox irq. */
> +     del_timer_sync(&bnad->stats_timer);
> +             netif_tx_disable(bnad->netdev);
> +             netif_carrier_off(bnad->netdev);
> +}

Some incorrect indentation here.

[...]
> +void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
> +{
[...]
> +#if 1
> +     ib_config->control_flags = BNA_IB_CF_INT_ENABLE |
> +         BNA_IB_CF_MASTER_ENABLE;
> +#else
> +     ib_config->control_flags = BNA_IB_CF_INT_ENABLE |
> +         BNA_IB_CF_INTER_PKT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
> +     ib_config->interpkt_count = bnad->rx_interpkt_count;
> +     ib_config->interpkt_timer = bnad->rx_interpkt_timeo;
> +#endif

If you always want to use the first version (#if 1) then get rid of the
second version.

[...]
> +/* Note: bnad_cleanup doesn't not free irqs and queues. */

A double negative can mean a positive, but this is ambiguous.  Either
change the comment to say clearly that it does free irqs and queues, or
remove the comment since the code is clear enough.

> +static void bnad_cleanup(struct bnad *bnad)
> +{
> +     kfree(bnad->rit);
> +     bnad->rit = NULL;
> +     kfree(bnad->txf_table);
> +     bnad->txf_table = NULL;
> +     kfree(bnad->rxf_table);
> +     bnad->rxf_table = NULL;
> +
> +     bnad_free_ibs(bnad);
> +     bnad_free_queues(bnad);
> +}
[...]
> +int bnad_open_locked(struct net_device *netdev)
> +{
> +     struct bnad *bnad = netdev_priv(netdev);
> +     uint i;
> +     int err;
> +
> +     ASSERT_RTNL();
> +     DPRINTK(WARNING, "%s open\n", netdev->name);
> +
> +     if (BNAD_NOT_READY(bnad)) {
> +             DPRINTK(WARNING, "%s is not ready yet (0x%lx)\n",
> +                     netdev->name, bnad->state);
> +             return 0;
> +     }
> +
> +     if (!test_bit(BNAD_DISABLED, &bnad->state)) {
> +             DPRINTK(WARNING, "%s is already opened (0x%lx)\n",
> +                     netdev->name, bnad->state);
> +
> +             return 0;
> +     }

Why are you returning 0 in these error cases?

[...]
> +int bnad_open(struct net_device *netdev)
> +{
> +     struct bnad *bnad = netdev_priv(netdev);
> +     int error = 0;
> +
> +     bnad_lock();
> +     if (!test_bit(BNAD_PORT_DISABLED, &bnad->state))
> +             error = bnad_open_locked(netdev);
> +     bnad_unlock();
> +     return error;
> +}
> +
> +int bnad_stop(struct net_device *netdev)
> +{
> +     int error = 0;
> +
> +     bnad_lock();
> +     error = bnad_stop_locked(netdev);
> +     bnad_unlock();
> +     return error;
> +}

Given that bnad_lock() and bnad_unlock() are defined as doing nothing,
you should merge these with the functions they call.

[...]
> +static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
> +{
> +#ifdef NETIF_F_TSO
> +     int err;
> +
> +#ifdef SKB_GSO_TCPV4
> +     /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */

So there is no need to test for it. :-)

[...]
> +int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)

Return type must be netdev_tx_t.

[...]
> +static void bnad_set_rx_mode(struct net_device *netdev)
> +{
> +     bnad_lock();
> +     bnad_set_rx_mode_locked(netdev);
> +     bnad_unlock();
> +}
[...]
> +static int bnad_set_mac_address(struct net_device *netdev, void *addr)
> +{
> +     int err = 0;
> +
> +     bnad_lock();
> +     err = bnad_set_mac_address_locked(netdev, addr);
> +     bnad_unlock();
> +     return err;
> +
> +}

Can also be merged with the functions they call.

[...]
> +static int bnad_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
> +{
> +     return -EOPNOTSUPP;
> +}

You don't need to define an ioctl() operation at all.

[...]
> +#ifdef CONFIG_NET_POLL_CONTROLLER
> +static void bnad_netpoll(struct net_device *netdev)
> +{
> +     struct bnad *bnad = netdev_priv(netdev);
> +
> +     DPRINTK(INFO, "%s bnad_netpoll\n", netdev->name);
> +     disable_irq(bnad->pcidev->irq);
> +     bnad_isr(bnad->pcidev->irq, netdev);
> +     enable_irq(bnad->pcidev->irq);
> +}
> +#endif

This doesn't look like it will work when the hardware is configured for
MSI-X.

[...]
> +static void bnad_stats_timeo(unsigned long data)
> +{
> +     struct bnad *bnad = (struct bnad *)data;
> +     int i;
> +     struct bnad_rxq_info *rxqinfo;
> +
> +     spin_lock_irq(&bnad->priv_lock);
> +     bna_stats_get(bnad->priv);
> +     spin_unlock_irq(&bnad->priv_lock);
> +
> +     if (bnad->rx_dyn_coalesce_on) {
> +             u8 cls_timer;
> +             struct bnad_cq_info *cq;
> +             for (i = 0; i < bnad->cq_num; i++) {
> +                     cq = &bnad->cq_table[i];
> +
> +                     if ((cq->pkt_rate.small_pkt_cnt == 0)
> +                         && (cq->pkt_rate.large_pkt_cnt == 0))
> +                             continue;
> +
> +                             cls_timer = bna_calc_coalescing_timer(
> +                             bnad->priv, &cq->pkt_rate);
> +
> +                     /*For NAPI version, coalescing timer need to stored*/
> +                     cq->rx_coalescing_timeo = cls_timer;

I can't parse this comment.

[...]
> +static int bnad_priv_init(struct bnad *bnad)
> +{
> +     dma_addr_t dma_addr;
> +     struct bna_dma_addr bna_dma_addr;
> +     char inst_name[16];
> +     int err, i;
> +     struct bfa_pcidev_s pcidev_info;
> +     u32 intr_mask;
> +
> +     DPRINTK(DEBUG, "port %u bnad_priv_init\n", bnad->bna_id);
> +
> +     if (bnad_msix)
> +             bnad->flags |= BNAD_F_MSIX;
> +     bnad_q_num_init(bnad, bnad_rxqsets_used);
> +
> +     bnad->work_flags = 0;
> +     INIT_WORK(&bnad->work, bnad_work);
> +
> +     init_timer(&bnad->stats_timer);
> +     bnad->stats_timer.function = &bnad_stats_timeo;
> +     bnad->stats_timer.data = (unsigned long)bnad;
[...]
> +     init_timer(&bnad->ioc_timer);
> +     bnad->ioc_timer.function = &bnad_ioc_timeout;
> +     bnad->ioc_timer.data = (unsigned long)bnad;

Each of these groups of three statements can be written as one call to
setup_timer().

> +     mod_timer(&bnad->ioc_timer, jiffies + HZ * BNA_IOC_TIMER_FREQ / 1000);

It would be clearer to write the timeout as jiffies +
msecs_to_jiffies(BNA_IOC_TIMER_FREQ).  Also, given that
BNA_IOC_TIMER_FREQ is the *period* of the timer, maybe it should be
called BNA_IOC_TIMER_PERIOD.

> +static int __devinit
> +bnad_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidev_id)
> +{
> +     int err, using_dac;
> +     struct net_device *netdev;
> +     struct bnad *bnad;
> +     unsigned long mmio_start, mmio_len;
> +     static u32 bna_id;
> +
> +     DPRINTK(INFO, "bnad_pci_probe(0x%p, 0x%p)\n", pcidev, pcidev_id);
> +
> +     DPRINTK(DEBUG, "PCI func %d\n", PCI_FUNC(pcidev->devfn));
> +     if (!bfad_get_firmware_buf(pcidev)) {
> +             printk(KERN_WARNING "Failed to load Firmware Image!\n");
> +             return 0;

You *must* return an error code here.

[...]
> +     netdev->netdev_ops = &bnad_netdev_ops;
> +     netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
> +#ifdef NETIF_F_IPV6_CSUM
> +     netdev->features |= NETIF_F_IPV6_CSUM;
> +#endif
> +#ifdef NETIF_F_TSO
> +     netdev->features |= NETIF_F_TSO;
> +#endif
> +#ifdef NETIF_F_TSO6
> +     netdev->features |= NETIF_F_TSO6;
> +#endif
> +#ifdef NETIF_F_LRO
> +     netdev->features |= NETIF_F_LRO;
> +#endif
> +#ifdef BNAD_VLAN_FEATURES
> +     netdev->vlan_features = netdev->features;
> +#endif

Get rid of these macro conditions.

> +     if (using_dac)
> +             netdev->features |= NETIF_F_HIGHDMA;
> +     netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
> +         NETIF_F_HW_VLAN_FILTER;
> +
> +     netdev->mem_start = mmio_start;
> +     netdev->mem_end = mmio_start + mmio_len - 1;
> +
> +     bnad_set_ethtool_ops(netdev);
> +
> +     bnad->bna_id = bna_id;
> +     err = bnad_priv_init(bnad);
> +     if (err) {
> +             printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
> +             goto unmap_bar0;
> +     }
> +
> +     BNA_ASSERT(netdev->addr_len == ETH_ALEN);
> +#ifdef ETHTOOL_GPERMADDR
> +     memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
> +#endif

Just put the address in netdev->perm_addr in the first place, and don't
test ETHTOOL_GPERMADDR.

[...]
> +static void __devexit bnad_pci_remove(struct pci_dev *pcidev)
> +{
> +     struct net_device *netdev = pci_get_drvdata(pcidev);
> +     struct bnad *bnad;
> +
> +     DPRINTK(INFO, "%s bnad_pci_remove\n", netdev->name);
> +     if (!netdev)
> +             return;

Surely this would indicate a bug?

[...]
> diff -ruP linux-2.6.32-rc4-orig/drivers/net/bna/bnad.h linux-2.6.32-rc4-mod/drivers/net/bna/bnad.h
> --- linux-2.6.32-rc4-orig/drivers/net/bna/bnad.h        1969-12-31 16:00:00.000000000 -0800
> +++ linux-2.6.32-rc4-mod/drivers/net/bna/bnad.h 2009-10-16 10:30:53.075436000 -0700
[...]
> +#if !defined(CONFIG_INET_LRO) && !defined(CONFIG_INET_LRO_MODULE)
> +#include <net/ip.h>
> +#include <net/tcp.h>
> +#else
> +#include <linux/inet_lro.h>
> +#endif
> +
> +#include "bnad_compat.h"
> +
> +#if !defined(CONFIG_INET_LRO) && !defined(CONFIG_INET_LRO_MODULE)
> +#include "inet_lro.h"
> +#endif

What is this?  You want to use your own copy of inet_lro?

You should really be using GRO instead (which is a lot easier).

[...]
> +#define bnad_lock()
> +#define bnad_unlock()

What's the point of this?

[...]
> +struct bnad {
[...]
> +     struct net_device_stats net_stats;

You don't need this; use the stats in struct net_device.

[...]
> +     unsigned char perm_addr[ETH_ALEN];

Use the perm_addr in struct net_device.

> +     u32 pci_saved_config[16];
[...]

You don't need this; the PCI core saves config registers in struct
pci_dev.

You should rebase this against net-next-2.6 and run
scripts/checkpatch.pl over it before resubmitting.

Ben.

--
Ben Hutchings, Senior Software Engineer, Solarflare Communications
Not speaking for my employer; that's the marketing department's job.
They asked us to note that Solarflare product names are trademarked.


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
  2009-10-16 18:24 Rasesh Mody
@ 2009-10-16 20:20 ` Ben Hutchings
  2009-10-16 23:19   ` Rasesh Mody
  2009-10-20  0:54 ` Herbert Xu
  1 sibling, 1 reply; 30+ messages in thread
From: Ben Hutchings @ 2009-10-16 20:20 UTC (permalink / raw)
  To: Rasesh Mody; +Cc: netdev, amathur

On Fri, 2009-10-16 at 11:24 -0700, Rasesh Mody wrote:
> From: Rasesh Mody <rmody@brocade.com>
> 
> This is patch 1/6 which contains linux driver source for
> Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
> 
> We wish this patch to be considered for inclusion in 2.6.32

I think it's a bit late for that.

[...]
> +#ifdef NETIF_F_TSO
> +#include <net/checksum.h>
> +#endif

NETIF_F_TSO is always defined; remove the check.

[...]
> +#ifdef BNAD_NO_IP_ALIGN
> +#undef NET_IP_ALIGN
> +#define NET_IP_ALIGN	0
> +#endif

Don't redefine standard macros.  Define your own which is set to either
NET_IP_ALIGN or 0 as appropriate.

> +#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
> +
> +#define BNAD_RESET_Q(_bnad, _q, _unmap_q)				\
> +do {									\
> +	if ((_q)->producer_index != (_q)->consumer_index) {      \
> +		DPRINTK(ERR, "Q producer index %u != ",	(_q)->producer_index);      \
> +		DPRINTK(ERR, "consumer index %u\n", (_q)->consumer_index);      \
> +	}								\
> +	BNA_ASSERT((_q)->producer_index == (_q)->consumer_index);      \
> +	if ((_unmap_q)->producer_index != (_unmap_q)->consumer_index) {      \
> +		DPRINTK(ERR, "UnmapQ producer index %u != ", (_unmap_q)->producer_index);      \
> +		DPRINTK(ERR, "consumer index %u\n", (_unmap_q)->consumer_index);      \
> +	}								\
> +	BNA_ASSERT((_unmap_q)->producer_index == \
> +		(_unmap_q)->consumer_index);      \
> +	(_q)->producer_index = 0;	\
> +	(_q)->consumer_index = 0;	\
> +	(_unmap_q)->producer_index = 0;	\
> +	(_unmap_q)->consumer_index = 0;	\
> +	{	\
> +		u32 _ui;	\
> +		for (_ui = 0; _ui < (_unmap_q)->q_depth; _ui++)		\
> +			BNA_ASSERT(!(_unmap_q)->unmap_array[_ui].skb);      \
> +	}	\
> +} while (0)

Is there any reason not to write this as a function?  It looks like an
infrequent control operation that shouldn't even be an inline function.

[...]
> +static const struct net_device_ops bnad_netdev_ops = {
> +	.ndo_open				= bnad_open,
> +	.ndo_stop				= bnad_stop,
> +	.ndo_start_xmit			= bnad_start_xmit,
> +	.ndo_get_stats			= bnad_get_stats,
> +#ifdef HAVE_SET_RX_MODE
> +	.ndo_set_rx_mode		= &bnad_set_rx_mode,
> +#endif

The HAVE_* macros are meant for use by out-of-tree drivers.  There is no
need to test them in in-tree code.

[...]
> +static int bnad_check_module_params(void)
> +{
> +	/* bnad_msix */
> +	if (bnad_msix && bnad_msix != 1)
> +		printk(KERN_WARNING "bna: bnad_msix should be 0 or 1, "
> +		    "%u is invalid, set bnad_msix to 1\n", bnad_msix);
> +
> +	/* bnad_small_large_rxbufs */
> +	if (bnad_small_large_rxbufs && bnad_small_large_rxbufs != 1)
> +		printk(KERN_WARNING "bna: bnad_small_large_rxbufs should be "
> +		    "0 or 1, %u is invalid, set bnad_small_large_rxbufs to 1\n",
> +		    bnad_small_large_rxbufs);
> +	if (bnad_small_large_rxbufs)
> +		bnad_rxqs_per_cq = 2;
> +	else
> +		bnad_rxqs_per_cq = 1;
> +
> +	/* bnad_rxqsets_used */
> +	if (bnad_rxqsets_used > BNAD_MAX_RXQS / bnad_rxqs_per_cq) {
> +		printk(KERN_ERR "bna: the maximum value for bnad_rxqsets_used "
> +		    "is %u, %u is invalid\n",
> +		    BNAD_MAX_RXQS / bnad_rxqs_per_cq, bnad_rxqsets_used);
> +		return -EINVAL;
> +	}

There is a cleaner way to validate and reject module parameter values
which is to define the parameters with module_param_call().

> +static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
> +{
> +	u16 to_alloc, alloced, unmap_prod, wi_range;
> +	struct bnad_skb_unmap *unmap_array;
> +	struct bna_rxq_entry *rxent;
> +	struct sk_buff *skb;
> +	dma_addr_t dma_addr;
> +
> +	alloced = 0;
> +	to_alloc = BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
> +	    rxqinfo->skb_unmap_q.q_depth);
> +
> +	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
> +	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
> +	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
> +	BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
> +
> +	while (to_alloc--) {
> +		if (!wi_range) {
> +			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q,
> +			    rxent, wi_range);
> +			BNA_ASSERT(wi_range &&
> +			    wi_range <= rxqinfo->rxq.q.q_depth);
> +		}
> +#ifdef BNAD_RXBUF_HEADROOM
> +		skb = netdev_alloc_skb(rxqinfo->bnad->netdev,
> +		    rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN);
> +#else
> +		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
> +		    GFP_ATOMIC);
> +#endif

Why is this conditional?

[...]
> +static irqreturn_t bnad_msix_rx(int irq, void *data)
> +{
> +	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
> +	struct bnad *bnad = cqinfo->bnad;
> +
> +		if (likely(netif_rx_schedule_prep(bnad->netdev,
> +			&cqinfo->napi))) {
> +			bnad_disable_rx_irq(bnad, cqinfo);
> +			__netif_rx_schedule(bnad->netdev, &cqinfo->napi);
> +		}
> +
> +	return IRQ_HANDLED;
> +}

Indentation is wrong.

[...]
> +static irqreturn_t bnad_isr(int irq, void *data)
> +{
> +	struct net_device *netdev = data;
> +	struct bnad *bnad = netdev_priv(netdev);
> +	u32 intr_status;
> +
> +	spin_lock(&bnad->priv_lock);
> +	bna_intr_status_get(bnad->priv, &intr_status);
> +	spin_unlock(&bnad->priv_lock);
> +
> +	if (!intr_status)
> +		return IRQ_NONE;
> +
> +	DPRINTK(DEBUG, "port %u bnad_isr: 0x%x\n", bnad->bna_id, intr_status);
> +	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
> +		spin_lock(&bnad->priv_lock);
> +		bna_mbox_err_handler(bnad->priv, intr_status);
> +		spin_unlock(&bnad->priv_lock);
> +		if (BNA_IS_ERR_INTR(intr_status) ||
> +		    !BNA_IS_INTX_DATA_INTR(intr_status))
> +			goto exit_isr;
> +	}
> +
> +	if (likely(netif_rx_schedule_prep(bnad->netdev,
> +	    &bnad->cq_table[0].napi))) {
> +		bnad_disable_txrx_irqs(bnad);
> +		__netif_rx_schedule(bnad->netdev, &bnad->cq_table[0].napi);
> +	}

These functions don't exist any more!

[...]
> +static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
> +{
> +	BNA_ASSERT(txq_id < bnad->txq_num);
> +	if (!(bnad->flags & BNAD_F_MSIX))
> +		return 0;
> +	DPRINTK(DEBUG, "port %u requests irq %u for TxQ %u in MSIX mode\n",
> +		bnad->bna_id, bnad->msix_table[txq_id].vector, txq_id);
> +	return request_irq(bnad->msix_table[txq_id].vector,
> +	    (irq_handler_t)&bnad_msix_tx, 0, bnad->txq_table[txq_id].name,

Why are you casting this function pointer?  It has the right type
already, and if it didn't then casting wouldn't fix the matter.

> +	    &bnad->txq_table[txq_id]);
> +}
> +
> +int bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
> +{
> +	BNA_ASSERT(cq_id < bnad->cq_num);
> +	if (!(bnad->flags & BNAD_F_MSIX))
> +		return 0;
> +	DPRINTK(DEBUG, "port %u requests irq %u for CQ %u in MSIX mode\n",
> +		bnad->bna_id,
> +		bnad->msix_table[bnad->txq_num + cq_id].vector, cq_id);
> +	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
> +	    (irq_handler_t)&bnad_msix_rx, 0, bnad->cq_table[cq_id].name,

Same here.

[...]
> +static void bnad_link_up_cb(void *arg, u8 status)
> +{
> +	struct bnad *bnad = (struct bnad *)arg;
> +	struct net_device *netdev = bnad->netdev;
> +
> +	DPRINTK(INFO, "%s bnad_link_up_cb\n", netdev->name);
> +	if (netif_running(netdev)) {
> +		if (!netif_carrier_ok(netdev) &&
> +		    !test_bit(BNAD_DISABLED, &bnad->state)) {
> +				printk(KERN_INFO "%s link up\n", netdev->name);
> +			netif_carrier_on(netdev);
> +			netif_wake_queue(netdev);
> +			bnad->stats.netif_queue_wakeup++;
> +		}
> +	}
> +}
> +
> +static void bnad_link_down_cb(void *arg, u8 status)
> +{
> +	struct bnad *bnad = (struct bnad *)arg;
> +	struct net_device *netdev = bnad->netdev;
> +
> +	DPRINTK(INFO, "%s bnad_link_down_cb\n", netdev->name);
> +	if (netif_running(netdev)) {
> +		if (netif_carrier_ok(netdev)) {
> +			printk(KERN_INFO "%s link down\n", netdev->name);
> +			netif_carrier_off(netdev);
> +			netif_stop_queue(netdev);
> +			bnad->stats.netif_queue_stop++;
> +		}
> +	}
> +}

There is no need to wake/stop the TX queues here; the netdev core
understands that TX queues must be stopped while the link is down.

[...]
> +static void bnad_detach(struct bnad *bnad)
> +{
[...]
> +	/* Wait to make sure Tx and Rx are stopped. */
> +	msleep(1000);
> +	bnad_free_txrx_irqs(bnad);
> +	bnad_sync_mbox_irq(bnad);
> +
> +		bnad_napi_disable(bnad);
> +		bnad_napi_uninit(bnad);
> +
> +	/* Delete the stats timer after synchronize with mbox irq. */
> +	del_timer_sync(&bnad->stats_timer);
> +		netif_tx_disable(bnad->netdev);
> +		netif_carrier_off(bnad->netdev);
> +}

Some incorrect indentation here.

[...]
> +void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
> +{
[...]
> +#if 1
> +	ib_config->control_flags = BNA_IB_CF_INT_ENABLE |
> +	    BNA_IB_CF_MASTER_ENABLE;
> +#else
> +	ib_config->control_flags = BNA_IB_CF_INT_ENABLE |
> +	    BNA_IB_CF_INTER_PKT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
> +	ib_config->interpkt_count = bnad->rx_interpkt_count;
> +	ib_config->interpkt_timer = bnad->rx_interpkt_timeo;
> +#endif

If you always want to use the first version (#if 1) then get rid of the
second version.

[...]
> +/* Note: bnad_cleanup doesn't not free irqs and queues. */

A double negative can mean a positive, but this is ambiguous.  Either
change the comment to say clearly that it does free irqs and queues, or
remove the comment since the code is clear enough.

> +static void bnad_cleanup(struct bnad *bnad)
> +{
> +	kfree(bnad->rit);
> +	bnad->rit = NULL;
> +	kfree(bnad->txf_table);
> +	bnad->txf_table = NULL;
> +	kfree(bnad->rxf_table);
> +	bnad->rxf_table = NULL;
> +
> +	bnad_free_ibs(bnad);
> +	bnad_free_queues(bnad);
> +}
[...]
> +int bnad_open_locked(struct net_device *netdev)
> +{
> +	struct bnad *bnad = netdev_priv(netdev);
> +	uint i;
> +	int err;
> +
> +	ASSERT_RTNL();
> +	DPRINTK(WARNING, "%s open\n", netdev->name);
> +
> +	if (BNAD_NOT_READY(bnad)) {
> +		DPRINTK(WARNING, "%s is not ready yet (0x%lx)\n",
> +			netdev->name, bnad->state);
> +		return 0;
> +	}
> +
> +	if (!test_bit(BNAD_DISABLED, &bnad->state)) {
> +		DPRINTK(WARNING, "%s is already opened (0x%lx)\n",
> +			netdev->name, bnad->state);
> +
> +		return 0;
> +	}

Why are you returning 0 in these error cases?

[...]
> +int bnad_open(struct net_device *netdev)
> +{
> +	struct bnad *bnad = netdev_priv(netdev);
> +	int error = 0;
> +
> +	bnad_lock();
> +	if (!test_bit(BNAD_PORT_DISABLED, &bnad->state))
> +		error = bnad_open_locked(netdev);
> +	bnad_unlock();
> +	return error;
> +}
> +
> +int bnad_stop(struct net_device *netdev)
> +{
> +	int error = 0;
> +
> +	bnad_lock();
> +	error = bnad_stop_locked(netdev);
> +	bnad_unlock();
> +	return error;
> +}

Given that bnad_lock() and bnad_unlock() are defined as doing nothing,
you should merge these with the functions they call.

[...]
> +static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
> +{
> +#ifdef NETIF_F_TSO
> +	int err;
> +
> +#ifdef SKB_GSO_TCPV4
> +	/* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */

So there is no need to test for it. :-)

[...]
> +int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)

Return type must be netdev_tx_t.

[...]
> +static void bnad_set_rx_mode(struct net_device *netdev)
> +{
> +	bnad_lock();
> +	bnad_set_rx_mode_locked(netdev);
> +	bnad_unlock();
> +}
[...]
> +static int bnad_set_mac_address(struct net_device *netdev, void *addr)
> +{
> +	int err = 0;
> +
> +	bnad_lock();
> +	err = bnad_set_mac_address_locked(netdev, addr);
> +	bnad_unlock();
> +	return err;
> +
> +}

Can also be merged with the functions they call.

[...]
> +static int bnad_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
> +{
> +	return -EOPNOTSUPP;
> +}

You don't need to define an ioctl() operation at all.

[...]
> +#ifdef CONFIG_NET_POLL_CONTROLLER
> +static void bnad_netpoll(struct net_device *netdev)
> +{
> +	struct bnad *bnad = netdev_priv(netdev);
> +
> +	DPRINTK(INFO, "%s bnad_netpoll\n", netdev->name);
> +	disable_irq(bnad->pcidev->irq);
> +	bnad_isr(bnad->pcidev->irq, netdev);
> +	enable_irq(bnad->pcidev->irq);
> +}
> +#endif

This doesn't look like it will work when the hardware is configured for
MSI-X.

[...]
> +static void bnad_stats_timeo(unsigned long data)
> +{
> +	struct bnad *bnad = (struct bnad *)data;
> +	int i;
> +	struct bnad_rxq_info *rxqinfo;
> +
> +	spin_lock_irq(&bnad->priv_lock);
> +	bna_stats_get(bnad->priv);
> +	spin_unlock_irq(&bnad->priv_lock);
> +
> +	if (bnad->rx_dyn_coalesce_on) {
> +		u8 cls_timer;
> +		struct bnad_cq_info *cq;
> +		for (i = 0; i < bnad->cq_num; i++) {
> +			cq = &bnad->cq_table[i];
> +
> +			if ((cq->pkt_rate.small_pkt_cnt == 0)
> +			    && (cq->pkt_rate.large_pkt_cnt == 0))
> +				continue;
> +
> +				cls_timer = bna_calc_coalescing_timer(
> +				bnad->priv, &cq->pkt_rate);
> +
> +			/*For NAPI version, coalescing timer need to stored*/
> +			cq->rx_coalescing_timeo = cls_timer;

I can't parse this comment.

[...]
> +static int bnad_priv_init(struct bnad *bnad)
> +{
> +	dma_addr_t dma_addr;
> +	struct bna_dma_addr bna_dma_addr;
> +	char inst_name[16];
> +	int err, i;
> +	struct bfa_pcidev_s pcidev_info;
> +	u32 intr_mask;
> +
> +	DPRINTK(DEBUG, "port %u bnad_priv_init\n", bnad->bna_id);
> +
> +	if (bnad_msix)
> +		bnad->flags |= BNAD_F_MSIX;
> +	bnad_q_num_init(bnad, bnad_rxqsets_used);
> +
> +	bnad->work_flags = 0;
> +	INIT_WORK(&bnad->work, bnad_work);
> +
> +	init_timer(&bnad->stats_timer);
> +	bnad->stats_timer.function = &bnad_stats_timeo;
> +	bnad->stats_timer.data = (unsigned long)bnad;
[...]
> +	init_timer(&bnad->ioc_timer);
> +	bnad->ioc_timer.function = &bnad_ioc_timeout;
> +	bnad->ioc_timer.data = (unsigned long)bnad;

Each of these groups of three statements can be written as one call to
setup_timer().

> +	mod_timer(&bnad->ioc_timer, jiffies + HZ * BNA_IOC_TIMER_FREQ / 1000);

It would be clearer to write the timeout as jiffies +
msecs_to_jiffies(BNA_IOC_TIMER_FREQ).  Also, given that
BNA_IOC_TIMER_FREQ is the *period* of the timer, maybe it should be
called BNA_IOC_TIMER_PERIOD.

> +static int __devinit
> +bnad_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidev_id)
> +{
> +	int err, using_dac;
> +	struct net_device *netdev;
> +	struct bnad *bnad;
> +	unsigned long mmio_start, mmio_len;
> +	static u32 bna_id;
> +
> +	DPRINTK(INFO, "bnad_pci_probe(0x%p, 0x%p)\n", pcidev, pcidev_id);
> +
> +	DPRINTK(DEBUG, "PCI func %d\n", PCI_FUNC(pcidev->devfn));
> +	if (!bfad_get_firmware_buf(pcidev)) {
> +		printk(KERN_WARNING "Failed to load Firmware Image!\n");
> +		return 0;

You *must* return an error code here.

[...]
> +	netdev->netdev_ops = &bnad_netdev_ops;
> +	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
> +#ifdef NETIF_F_IPV6_CSUM
> +	netdev->features |= NETIF_F_IPV6_CSUM;
> +#endif
> +#ifdef NETIF_F_TSO
> +	netdev->features |= NETIF_F_TSO;
> +#endif
> +#ifdef NETIF_F_TSO6
> +	netdev->features |= NETIF_F_TSO6;
> +#endif
> +#ifdef NETIF_F_LRO
> +	netdev->features |= NETIF_F_LRO;
> +#endif
> +#ifdef BNAD_VLAN_FEATURES
> +	netdev->vlan_features = netdev->features;
> +#endif

Get rid of these macro conditions.

> +	if (using_dac)
> +		netdev->features |= NETIF_F_HIGHDMA;
> +	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
> +	    NETIF_F_HW_VLAN_FILTER;
> +
> +	netdev->mem_start = mmio_start;
> +	netdev->mem_end = mmio_start + mmio_len - 1;
> +
> +	bnad_set_ethtool_ops(netdev);
> +
> +	bnad->bna_id = bna_id;
> +	err = bnad_priv_init(bnad);
> +	if (err) {
> +		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
> +		goto unmap_bar0;
> +	}
> +
> +	BNA_ASSERT(netdev->addr_len == ETH_ALEN);
> +#ifdef ETHTOOL_GPERMADDR
> +	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
> +#endif

Just put the address in netdev->perm_addr in the first place, and don't
test ETHTOOL_GPERMADDR.

[...]
> +static void __devexit bnad_pci_remove(struct pci_dev *pcidev)
> +{
> +	struct net_device *netdev = pci_get_drvdata(pcidev);
> +	struct bnad *bnad;
> +
> +	DPRINTK(INFO, "%s bnad_pci_remove\n", netdev->name);
> +	if (!netdev)
> +		return;

Surely this would indicate a bug?

[...]
> diff -ruP linux-2.6.32-rc4-orig/drivers/net/bna/bnad.h linux-2.6.32-rc4-mod/drivers/net/bna/bnad.h
> --- linux-2.6.32-rc4-orig/drivers/net/bna/bnad.h        1969-12-31 16:00:00.000000000 -0800
> +++ linux-2.6.32-rc4-mod/drivers/net/bna/bnad.h 2009-10-16 10:30:53.075436000 -0700
[...]
> +#if !defined(CONFIG_INET_LRO) && !defined(CONFIG_INET_LRO_MODULE)
> +#include <net/ip.h>
> +#include <net/tcp.h>
> +#else
> +#include <linux/inet_lro.h>
> +#endif
> +
> +#include "bnad_compat.h"
> +
> +#if !defined(CONFIG_INET_LRO) && !defined(CONFIG_INET_LRO_MODULE)
> +#include "inet_lro.h"
> +#endif

What is this?  You want to use your own copy of inet_lro?

You should really be using GRO instead (which is a lot easier).

[...]
> +#define bnad_lock()
> +#define bnad_unlock()

What's the point of this?

[...]
> +struct bnad {
[...]
> +	struct net_device_stats net_stats;

You don't need this; use the stats in struct net_device.

[...]
> +	unsigned char perm_addr[ETH_ALEN];

Use the perm_addr in struct net_device.

> +	u32 pci_saved_config[16];
[...]

You don't need this; the PCI core saves config registers in struct
pci_dev.

You should rebase this against net-next-2.6 and run
scripts/checkpatch.pl over it before resubmitting.

Ben.

-- 
Ben Hutchings, Senior Software Engineer, Solarflare Communications
Not speaking for my employer; that's the marketing department's job.
They asked us to note that Solarflare product names are trademarked.


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver
@ 2009-10-16 18:24 Rasesh Mody
  2009-10-16 20:20 ` Ben Hutchings
  2009-10-20  0:54 ` Herbert Xu
  0 siblings, 2 replies; 30+ messages in thread
From: Rasesh Mody @ 2009-10-16 18:24 UTC (permalink / raw)
  To: netdev; +Cc: amathur

From: Rasesh Mody <rmody@brocade.com>

This is patch 1/6 which contains linux driver source for
Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.

We wish this patch to be considered for inclusion in 2.6.32

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 bnad.c | 3576 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 bnad.h |  374 ++++++
 2 files changed, 3950 insertions(+)

diff -ruP linux-2.6.32-rc4-orig/drivers/net/bna/bnad.c linux-2.6.32-rc4-mod/drivers/net/bna/bnad.c
--- linux-2.6.32-rc4-orig/drivers/net/bna/bnad.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.32-rc4-mod/drivers/net/bna/bnad.c	2009-10-16 10:30:53.050461000 -0700
@@ -0,0 +1,3576 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+/**
+ *  bnad.c  Brocade 10G PCIe Ethernet driver.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/workqueue.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pm.h>
+#include <linux/random.h>
+
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#endif
+
+
+#include "bnad.h"
+#include "bna_os.h"
+#include "bna_iocll.h"
+#include "bna_intr.h"
+#include "bnad_defs.h"
+
+#ifdef BNAD_NO_IP_ALIGN
+#undef NET_IP_ALIGN
+#define NET_IP_ALIGN	0
+#endif
+
+
+
+#define BNAD_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
+
+#define BNAD_RESET_Q(_bnad, _q, _unmap_q)				\
+do {									\
+	if ((_q)->producer_index != (_q)->consumer_index) {      \
+		DPRINTK(ERR, "Q producer index %u != ",	(_q)->producer_index);      \
+		DPRINTK(ERR, "consumer index %u\n", (_q)->consumer_index);      \
+	}								\
+	BNA_ASSERT((_q)->producer_index == (_q)->consumer_index);      \
+	if ((_unmap_q)->producer_index != (_unmap_q)->consumer_index) {      \
+		DPRINTK(ERR, "UnmapQ producer index %u != ", (_unmap_q)->producer_index);      \
+		DPRINTK(ERR, "consumer index %u\n", (_unmap_q)->consumer_index);      \
+	}								\
+	BNA_ASSERT((_unmap_q)->producer_index == \
+		(_unmap_q)->consumer_index);      \
+	(_q)->producer_index = 0;	\
+	(_q)->consumer_index = 0;	\
+	(_unmap_q)->producer_index = 0;	\
+	(_unmap_q)->consumer_index = 0;	\
+	{	\
+		u32 _ui;	\
+		for (_ui = 0; _ui < (_unmap_q)->q_depth; _ui++)		\
+			BNA_ASSERT(!(_unmap_q)->unmap_array[_ui].skb);      \
+	}	\
+} while (0)
+
+static uint bnad_msix = 1;
+module_param(bnad_msix, uint, 0444);
+MODULE_PARM_DESC(bnad_msix, "Enable MSI-X");
+
+uint bnad_small_large_rxbufs = 1;
+module_param(bnad_small_large_rxbufs, uint, 0444);
+MODULE_PARM_DESC(bnad_small_large_rxbufs, "Enable small/large buffer receive");
+
+static uint bnad_rxqsets_used;
+module_param(bnad_rxqsets_used, uint, 0444);
+MODULE_PARM_DESC(bnad_rxqsets_used, "Number of RxQ sets to be used");
+
+static uint bnad_ipid_mode;
+module_param(bnad_ipid_mode, uint, 0444);
+MODULE_PARM_DESC(bnad_ipid_mode, "0 - Use IP ID 0x0000 - 0x7FFF for LSO; "
+    "1 - Use full range of IP ID for LSO");
+
+uint bnad_txq_depth = BNAD_ENTRIES_PER_TXQ;
+module_param(bnad_txq_depth, uint, 0444);
+MODULE_PARM_DESC(bnad_txq_depth, "Maximum number of entries per TxQ");
+
+uint bnad_rxq_depth = BNAD_ENTRIES_PER_RXQ;
+module_param(bnad_rxq_depth, uint, 0444);
+MODULE_PARM_DESC(bnad_rxq_depth, "Maximum number of entries per RxQ");
+
+static uint bnad_vlan_strip = 1;
+module_param(bnad_vlan_strip, uint, 0444);
+MODULE_PARM_DESC(bnad_vlan_strip, "Let the hardware strip off VLAN header");
+
+static uint bnad_log_level = LOG_WARN_LEVEL;
+module_param(bnad_log_level, uint, 0644);
+MODULE_PARM_DESC(bnad_log_level, "Log level");
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0644);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable auto recovery");
+
+uint bnad_rxqs_per_cq;
+
+static void bnad_disable_msix(struct bnad *bnad);
+static void bnad_free_ibs(struct bnad *bnad);
+static void bnad_set_rx_mode(struct net_device *netdev);
+static void bnad_set_rx_mode_locked(struct net_device *netdev);
+static void bnad_reconfig_vlans(struct bnad *bnad);
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets);
+static int bnad_set_mac_address(struct net_device *netdev, void *addr);
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr);
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu);
+static int bnad_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid);
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid);
+static void bnad_netpoll(struct net_device *netdev);
+
+static const struct net_device_ops bnad_netdev_ops = {
+	.ndo_open				= bnad_open,
+	.ndo_stop				= bnad_stop,
+	.ndo_start_xmit			= bnad_start_xmit,
+	.ndo_get_stats			= bnad_get_stats,
+#ifdef HAVE_SET_RX_MODE
+	.ndo_set_rx_mode		= &bnad_set_rx_mode,
+#endif
+	.ndo_set_multicast_list	= bnad_set_rx_mode,
+	.ndo_set_mac_address	= bnad_set_mac_address,
+	.ndo_change_mtu			= bnad_change_mtu,
+	.ndo_do_ioctl			= bnad_ioctl,
+
+	.ndo_vlan_rx_register	= bnad_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= bnad_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bnad_netpoll,
+#endif
+};
+static int bnad_check_module_params(void)
+{
+	/* bnad_msix */
+	if (bnad_msix && bnad_msix != 1)
+		printk(KERN_WARNING "bna: bnad_msix should be 0 or 1, "
+		    "%u is invalid, set bnad_msix to 1\n", bnad_msix);
+
+	/* bnad_small_large_rxbufs */
+	if (bnad_small_large_rxbufs && bnad_small_large_rxbufs != 1)
+		printk(KERN_WARNING "bna: bnad_small_large_rxbufs should be "
+		    "0 or 1, %u is invalid, set bnad_small_large_rxbufs to 1\n",
+		    bnad_small_large_rxbufs);
+	if (bnad_small_large_rxbufs)
+		bnad_rxqs_per_cq = 2;
+	else
+		bnad_rxqs_per_cq = 1;
+
+	/* bnad_rxqsets_used */
+	if (bnad_rxqsets_used > BNAD_MAX_RXQS / bnad_rxqs_per_cq) {
+		printk(KERN_ERR "bna: the maximum value for bnad_rxqsets_used "
+		    "is %u, %u is invalid\n",
+		    BNAD_MAX_RXQS / bnad_rxqs_per_cq, bnad_rxqsets_used);
+		return -EINVAL;
+	}
+	if (!BNA_POWER_OF_2(bnad_rxqsets_used)) {
+		printk(KERN_ERR "bna: bnad_rxqsets_used should be power of 2, "
+		    "%u is invalid\n", bnad_rxqsets_used);
+		return -EINVAL;
+	}
+	if (bnad_rxqsets_used > (uint)num_online_cpus())
+		printk(KERN_WARNING "bna: set bnad_rxqsets_used (%u) "
+		    "larger than number of CPUs (%d) may not be helpful\n",
+		    bnad_rxqsets_used, num_online_cpus());
+
+	/* bnad_ipid_mode */
+	if (bnad_ipid_mode && bnad_ipid_mode != 1) {
+		printk(KERN_ERR "bna: bnad_ipid_mode should be 0 or 1, "
+		    "%u is invalid\n", bnad_ipid_mode);
+		return -EINVAL;
+	}
+
+	/* bnad_txq_depth */
+	if (bnad_txq_depth > BNAD_MAX_Q_DEPTH) {
+		printk(KERN_ERR "bna: bnad_txq_depth should be <= %u, "
+		    "%u is invalid\n", BNAD_MAX_Q_DEPTH, bnad_txq_depth);
+		return -EINVAL;
+	}
+	if (!BNA_POWER_OF_2(bnad_txq_depth)) {
+		printk(KERN_ERR "bna: bnad_txq_depth should be power of 2, "
+		    "%u is invalid\n", bnad_txq_depth);
+		return -EINVAL;
+	}
+	if (bnad_txq_depth < BNAD_MIN_Q_DEPTH) {
+		printk(KERN_ERR "bna: bnad_txq_depth should be >= %u, "
+		    "%u is invalid\n", BNAD_MIN_Q_DEPTH, bnad_txq_depth);
+		return -EINVAL;
+	}
+
+	/* bnad_rxq_depth */
+	if (bnad_rxq_depth > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq) {
+		printk(KERN_ERR "bna: bnad_rxq_depth should be <= %u, "
+		    "%u is invalid\n", BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq,
+		    bnad_rxq_depth);
+		return -EINVAL;
+	}
+	if (!BNA_POWER_OF_2(bnad_rxq_depth)) {
+		printk(KERN_ERR "bna: bnad_rxq_depth should be power of 2, "
+		    "%u is invalid\n", bnad_rxq_depth);
+		return -EINVAL;
+	}
+	if (bnad_rxq_depth < BNAD_MIN_Q_DEPTH) {
+		printk(KERN_ERR "bna: bnad_rxq_depth should be >= %u, "
+		    "%u is invalid\n", BNAD_MIN_Q_DEPTH, bnad_rxq_depth);
+		return -EINVAL;
+	}
+
+	/* bnad_vlan_strip */
+	if (bnad_vlan_strip && bnad_vlan_strip != 1)
+		printk(KERN_WARNING "bna: bnad_vlan_strip should be 0 or 1, "
+		    "%u is invalid, set bnad_vlan_strip to 1\n",
+		    bnad_vlan_strip);
+
+	/* bnad_ioc_auto_recover */
+	if (bnad_ioc_auto_recover && bnad_ioc_auto_recover != 1)
+		printk(KERN_WARNING
+			"bna: bnad_ioc_auto_recover should be 0 or 1, "
+		    "%u is invalid, set bnad_ioc_auto_recover to 1\n",
+		    bnad_ioc_auto_recover);
+
+
+	return 0;
+}
+
+u32 bnad_get_msglevel(struct net_device *netdev)
+{
+	return bnad_log_level;
+}
+
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	bnad_log_level = msglevel;
+}
+
+static unsigned int bnad_free_txbufs(struct bnad_txq_info *txqinfo,
+    u16 updated_txq_cons)
+{
+	struct bnad *bnad = txqinfo->bnad;
+	unsigned int sent_packets = 0, sent_bytes = 0;
+	u16 wis, unmap_cons;
+	struct bnad_skb_unmap *unmap_array;
+	struct sk_buff *skb;
+	int i;
+
+	wis = BNAD_Q_INDEX_CHANGE(txqinfo->txq.q.consumer_index,
+	    updated_txq_cons, txqinfo->txq.q.q_depth);
+	BNA_ASSERT(wis <=
+	    BNA_QE_IN_USE_CNT(&txqinfo->txq.q, txqinfo->txq.q.q_depth));
+	unmap_array = txqinfo->skb_unmap_q.unmap_array;
+	unmap_cons = txqinfo->skb_unmap_q.consumer_index;
+	prefetch(&unmap_array[unmap_cons + 1]);
+	while (wis) {
+		skb = unmap_array[unmap_cons].skb;
+		BNA_ASSERT(skb);
+		unmap_array[unmap_cons].skb = NULL;
+		BNA_ASSERT(wis >=
+		    BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags));
+		BNA_ASSERT(((txqinfo->skb_unmap_q.producer_index -
+		    unmap_cons) & (txqinfo->skb_unmap_q.q_depth - 1)) >=
+		    1 + skb_shinfo(skb)->nr_frags);
+
+		sent_packets++;
+		sent_bytes += skb->len;
+		wis -= BNAD_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+		pci_unmap_single(bnad->pcidev,
+		    pci_unmap_addr(&unmap_array[unmap_cons], dma_addr),
+		    skb_headlen(skb), PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		BNA_QE_INDX_ADD(unmap_cons, 1, txqinfo->skb_unmap_q.q_depth);
+		prefetch(&unmap_array[unmap_cons + 1]);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			pci_unmap_page(bnad->pcidev,
+			    pci_unmap_addr(&unmap_array[unmap_cons], dma_addr),
+			    skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE);
+			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+			    0);
+			BNA_QE_INDX_ADD(unmap_cons, 1,
+			    txqinfo->skb_unmap_q.q_depth);
+			prefetch(&unmap_array[unmap_cons + 1]);
+		}
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Update consumer pointers. */
+	txqinfo->txq.q.consumer_index = updated_txq_cons;
+	txqinfo->skb_unmap_q.consumer_index = unmap_cons;
+	txqinfo->tx_packets += sent_packets;
+	txqinfo->tx_bytes += sent_bytes;
+	return sent_packets;
+}
+
+static int bnad_lro_get_skb_header(struct sk_buff *skb, void **iphdr,
+    void **tcphdr, u64 *hdr_flags, void *priv)
+{
+	struct bna_cq_entry *cmpl = priv;
+	u32 flags = ntohl(cmpl->flags);
+
+	if ((flags & BNA_CQ_EF_IPV4) && (flags & BNA_CQ_EF_TCP)) {
+		skb_reset_network_header(skb);
+		skb_set_transport_header(skb, ip_hdrlen(skb));
+		*iphdr = ip_hdr(skb);
+		*tcphdr = tcp_hdr(skb);
+		*hdr_flags = LRO_IPV4 | LRO_TCP;
+		return 0;
+	} else {
+		return -1;
+	}
+}
+
+static inline void bnad_disable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->txq_table[i].ib, 0);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->cq_table[i].ib, 0);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+}
+
+static inline void bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->txq_table[i].ib, bnad->tx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->txq_table[i].ib, 0);
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		bna_ib_coalescing_timer_set(bnad->priv,
+		    &bnad->cq_table[i].ib,
+			bnad->cq_table[i].rx_coalescing_timeo);
+		bna_ib_ack(bnad->priv, &bnad->cq_table[i].ib, 0);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static inline void
+bnad_disable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib, 0);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+}
+static inline void
+bnad_enable_rx_irq(struct bnad *bnad, struct bnad_cq_info *cqinfo)
+{
+	spin_lock_irq(&bnad->priv_lock);
+
+	bna_ib_coalescing_timer_set(bnad->priv, &cqinfo->ib,
+	    cqinfo->rx_coalescing_timeo);
+	bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static unsigned int bnad_tx(struct bnad *bnad, struct bnad_txq_info *txqinfo)
+{
+	struct net_device *netdev = bnad->netdev;
+	unsigned int sent;
+
+	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags))
+		return 0;
+
+	DPRINTK(DEBUG, "%s ", netdev->name);
+	DPRINTK(DEBUG, "TxQ hw consumer index %u\n",
+		*txqinfo->hw_consumer_index);
+	 sent = bnad_free_txbufs(txqinfo,
+	    (u16)(*txqinfo->hw_consumer_index));
+	if (sent) {
+		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
+		    BNA_Q_FREE_COUNT(&txqinfo->txq) >=
+				BNAD_NETIF_WAKE_THRESHOLD) {
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+		bna_ib_ack(bnad->priv, &txqinfo->ib, sent);
+		DPRINTK(DEBUG, "%s ack TxQ IB %u packets\n",
+			netdev->name, sent);
+	} else {
+		bna_ib_ack(bnad->priv, &txqinfo->ib, 0);
+	}
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+
+	return sent;
+}
+
+static irqreturn_t bnad_msix_tx(int irq, void *data)
+{
+	struct bnad_txq_info *txqinfo = (struct bnad_txq_info *)data;
+	struct bnad *bnad = txqinfo->bnad;
+
+
+	bnad_tx(bnad, txqinfo);
+
+	return IRQ_HANDLED;
+}
+
+static void bnad_alloc_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	u16 to_alloc, alloced, unmap_prod, wi_range;
+	struct bnad_skb_unmap *unmap_array;
+	struct bna_rxq_entry *rxent;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+
+	alloced = 0;
+	to_alloc = BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+	    rxqinfo->skb_unmap_q.q_depth);
+
+	unmap_array = rxqinfo->skb_unmap_q.unmap_array;
+	unmap_prod = rxqinfo->skb_unmap_q.producer_index;
+	BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q, rxent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= rxqinfo->rxq.q.q_depth);
+
+	while (to_alloc--) {
+		if (!wi_range) {
+			BNA_RXQ_QPGE_PTR_GET(unmap_prod, &rxqinfo->rxq.q,
+			    rxent, wi_range);
+			BNA_ASSERT(wi_range &&
+			    wi_range <= rxqinfo->rxq.q.q_depth);
+		}
+#ifdef BNAD_RXBUF_HEADROOM
+		skb = netdev_alloc_skb(rxqinfo->bnad->netdev,
+		    rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN);
+#else
+		skb = alloc_skb(rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+		    GFP_ATOMIC);
+#endif
+		if (unlikely(!skb)) {
+			rxqinfo->rxbuf_alloc_failed++;
+			goto finishing;
+		}
+#ifndef BNAD_RXBUF_HEADROOM
+		skb->dev = rxqinfo->bnad->netdev;
+#endif
+		skb_reserve(skb, NET_IP_ALIGN);
+		unmap_array[unmap_prod].skb = skb;
+		dma_addr = pci_map_single(rxqinfo->bnad->pcidev, skb->data,
+		    rxqinfo->rxq_config.buffer_size, PCI_DMA_FROMDEVICE);
+		pci_unmap_addr_set(&unmap_array[unmap_prod],
+			dma_addr, dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, rxqinfo->skb_unmap_q.q_depth);
+
+		rxent++;
+		wi_range--;
+		alloced++;
+	}
+
+finishing:
+	if (likely(alloced)) {
+		rxqinfo->skb_unmap_q.producer_index = unmap_prod;
+		rxqinfo->rxq.q.producer_index = unmap_prod;
+		smp_mb();
+		bna_rxq_prod_indx_doorbell(&rxqinfo->rxq);
+	}
+}
+
+static inline void bnad_refill_rxq(struct bnad_rxq_info *rxqinfo)
+{
+	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags)) {
+		if (BNA_QE_FREE_CNT(&rxqinfo->skb_unmap_q,
+		    rxqinfo->skb_unmap_q.q_depth) >>
+		    BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+			bnad_alloc_rxbufs(rxqinfo);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+	}
+}
+
+static unsigned int
+bnad_poll_cq(struct bnad *bnad, struct bnad_cq_info *cqinfo, int budget)
+{
+	struct bna_cq_entry *cmpl, *next_cmpl;
+	unsigned int wi_range, packets = 0, wis = 0;
+	struct bnad_rxq_info *rxqinfo = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 flags;
+	struct bna_pkt_rate *pkt_rt = &cqinfo->pkt_rate;
+
+	prefetch(bnad);
+	prefetch(bnad->netdev);
+	cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+	BNA_ASSERT(wi_range && wi_range <= cqinfo->cq.q.q_depth);
+	while (cmpl->valid && packets < budget) {
+		packets++;
+		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+		rxqinfo = &bnad->rxq_table[cmpl->rxq_id];
+		unmap_q = &rxqinfo->skb_unmap_q;
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		prefetch(skb->data - NET_IP_ALIGN);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+		    pci_unmap_addr(
+		    &unmap_q->unmap_array[unmap_q->consumer_index],
+		    dma_addr),
+		    rxqinfo->rxq_config.buffer_size, PCI_DMA_FROMDEVICE);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		/* XXX May be bad for performance. */
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+		wis++;
+		if (likely(--wi_range)) {
+			next_cmpl = cmpl + 1;
+		} else {
+			BNA_Q_PI_ADD(&cqinfo->cq, wis);
+			wis = 0;
+			next_cmpl = bna_cq_pg_prod_ptr(&cqinfo->cq, &wi_range);
+			BNA_ASSERT(wi_range &&
+			    wi_range <= cqinfo->cq.q.q_depth);
+		}
+		prefetch(next_cmpl);
+
+		flags = ntohl(cmpl->flags);
+		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+			BNA_CQ_EF_FCS_ERROR | BNA_CQ_EF_TOO_LONG))) {
+			dev_kfree_skb_any(skb);
+			rxqinfo->rx_packets_with_error++;
+			goto next;
+		}
+
+		skb_put(skb, ntohs(cmpl->length));
+		if (likely(bnad->rx_csum &&
+		    (((flags & BNA_CQ_EF_IPV4) &&
+		    (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+		    (flags & BNA_CQ_EF_IPV6)) &&
+		    (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+		    (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		rxqinfo->rx_packets++;
+		rxqinfo->rx_bytes += skb->len;
+		skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+		if (bnad->vlangrp && (flags & BNA_CQ_EF_VLAN) &&
+		    bnad_vlan_strip) {
+			BNA_ASSERT(cmpl->vlan_tag);
+			if (skb->ip_summed == CHECKSUM_UNNECESSARY
+#ifdef NETIF_F_LRO
+			    && (bnad->netdev->features & NETIF_F_LRO)
+#endif
+) {
+				lro_vlan_hwaccel_receive_skb(&cqinfo->lro, skb,
+				    bnad->vlangrp, ntohs(cmpl->vlan_tag), cmpl);
+			} else {
+				vlan_hwaccel_receive_skb(skb, bnad->vlangrp,
+				    ntohs(cmpl->vlan_tag));
+			}
+
+		} else {
+
+			if (skb->ip_summed == CHECKSUM_UNNECESSARY
+#ifdef NETIF_F_LRO
+			    && (bnad->netdev->features & NETIF_F_LRO)
+#endif
+) {
+				lro_receive_skb(&cqinfo->lro, skb, cmpl);
+			} else {
+				netif_receive_skb(skb);
+			}
+
+		}
+
+		bnad->netdev->last_rx = jiffies;
+next:
+		cmpl->valid = 0;
+		cmpl = next_cmpl;
+	}
+
+	lro_flush_all(&cqinfo->lro);
+
+	BNA_Q_PI_ADD(&cqinfo->cq, wis);
+
+	if (likely(rxqinfo)) {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, packets);
+		/* Check the current queue first. */
+		bnad_refill_rxq(rxqinfo);
+
+		/* XXX counters per queue for refill? */
+		if (likely(bnad_small_large_rxbufs)) {
+			/* There are 2 RxQs - small and large buffer queues */
+			unsigned int rxq_id = (rxqinfo->rxq_id ^ 1);
+			bnad_refill_rxq(&bnad->rxq_table[rxq_id]);
+		}
+	} else {
+		bna_ib_ack(bnad->priv, &cqinfo->ib, 0);
+	}
+
+	return packets;
+}
+
+static irqreturn_t bnad_msix_rx(int irq, void *data)
+{
+	struct bnad_cq_info *cqinfo = (struct bnad_cq_info *)data;
+	struct bnad *bnad = cqinfo->bnad;
+
+		if (likely(netif_rx_schedule_prep(bnad->netdev,
+			&cqinfo->napi))) {
+			bnad_disable_rx_irq(bnad, cqinfo);
+			__netif_rx_schedule(bnad->netdev, &cqinfo->napi);
+		}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_msix_err_mbox(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		DPRINTK(DEBUG, "port %d msix err/mbox irq status 0x%x\n",
+			bnad->bna_id, intr_status);
+		bna_mbox_err_handler(bnad->priv, intr_status);
+	} else {
+		DPRINTK(WARNING, "port %d msix err/mbox irq status 0x%x\n",
+			 bnad->bna_id, intr_status);
+	}
+	spin_unlock(&bnad->priv_lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bnad_isr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct bnad *bnad = netdev_priv(netdev);
+	u32 intr_status;
+
+	spin_lock(&bnad->priv_lock);
+	bna_intr_status_get(bnad->priv, &intr_status);
+	spin_unlock(&bnad->priv_lock);
+
+	if (!intr_status)
+		return IRQ_NONE;
+
+	DPRINTK(DEBUG, "port %u bnad_isr: 0x%x\n", bnad->bna_id, intr_status);
+	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+		spin_lock(&bnad->priv_lock);
+		bna_mbox_err_handler(bnad->priv, intr_status);
+		spin_unlock(&bnad->priv_lock);
+		if (BNA_IS_ERR_INTR(intr_status) ||
+		    !BNA_IS_INTX_DATA_INTR(intr_status))
+			goto exit_isr;
+	}
+
+	if (likely(netif_rx_schedule_prep(bnad->netdev,
+	    &bnad->cq_table[0].napi))) {
+		bnad_disable_txrx_irqs(bnad);
+		__netif_rx_schedule(bnad->netdev, &bnad->cq_table[0].napi);
+	}
+
+exit_isr:
+	return IRQ_HANDLED;
+}
+
+static int bnad_request_mbox_irq(struct bnad *bnad)
+{
+	int err;
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		DPRINTK(DEBUG,
+			"port %u requests IRQ %u for mailbox in MSI-X mode\n",
+			bnad->bna_id,
+			bnad->msix_table[bnad->msix_num - 1].vector);
+		err = request_irq(bnad->msix_table[bnad->msix_num - 1].vector,
+		    (irq_handler_t)&bnad_msix_err_mbox, 0, bnad->netdev->name,
+		    bnad->netdev);
+	} else {
+		DPRINTK(DEBUG, "port %u requests IRQ %u in INTx mode\n",
+			bnad->bna_id, bnad->pcidev->irq);
+		err = request_irq(bnad->pcidev->irq, (irq_handler_t)&bnad_isr,
+		    IRQF_SHARED, bnad->netdev->name, bnad->netdev);
+	}
+
+	if (err) {
+		dev_err(&bnad->pcidev->dev,
+		    "Request irq for mailbox failed: %d\n", err);
+		return err;
+	}
+
+	if (bnad->flags & BNAD_F_MSIX)
+		bna_mbox_msix_idx_set(bnad->priv, bnad->msix_num - 1);
+
+	bna_mbox_intr_enable(bnad->priv);
+	return 0;
+}
+
+
+static void bnad_sync_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->flags & BNAD_F_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+	synchronize_irq(irq);
+}
+
+static void bnad_free_mbox_irq(struct bnad *bnad)
+{
+	uint irq;
+
+	if (bnad->flags & BNAD_F_MSIX)
+		irq = bnad->msix_table[bnad->msix_num - 1].vector;
+	else
+		irq = bnad->pcidev->irq;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_mbox_intr_disable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+	free_irq(irq, bnad->netdev);
+}
+
+static int bnad_request_txq_irq(struct bnad *bnad, uint txq_id)
+{
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	if (!(bnad->flags & BNAD_F_MSIX))
+		return 0;
+	DPRINTK(DEBUG, "port %u requests irq %u for TxQ %u in MSIX mode\n",
+		bnad->bna_id, bnad->msix_table[txq_id].vector, txq_id);
+	return request_irq(bnad->msix_table[txq_id].vector,
+	    (irq_handler_t)&bnad_msix_tx, 0, bnad->txq_table[txq_id].name,
+	    &bnad->txq_table[txq_id]);
+}
+
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id)
+{
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	if (!(bnad->flags & BNAD_F_MSIX))
+		return 0;
+	DPRINTK(DEBUG, "port %u requests irq %u for CQ %u in MSIX mode\n",
+		bnad->bna_id,
+		bnad->msix_table[bnad->txq_num + cq_id].vector, cq_id);
+	return request_irq(bnad->msix_table[bnad->txq_num + cq_id].vector,
+	    (irq_handler_t)&bnad_msix_rx, 0, bnad->cq_table[cq_id].name,
+	    &bnad->cq_table[cq_id]);
+}
+
+static void bnad_intx_enable_txrx(struct bnad *bnad)
+{
+	u32 mask;
+	int i;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_intx_disable(bnad->priv, &mask);
+	mask &= ~0xffff;
+	bna_intx_enable(bnad->priv, mask);
+	for (i = 0; i < bnad->ib_num; i++)
+		bna_ib_ack(bnad->priv, bnad->ib_table[i].ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static int bnad_request_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	int i;
+	int err;
+
+	if (!(bnad->flags & BNAD_F_MSIX)) {
+		bnad_intx_enable_txrx(bnad);
+		return 0;
+	}
+
+	entries = bnad->msix_table;
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_request_txq_irq(bnad, i);
+		if (err) {
+			printk(KERN_ERR "%s request irq for TxQ %d failed %d\n",
+			    bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[i].vector,
+				    &bnad->txq_table[i]);
+			}
+			return err;
+		}
+	}
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_request_cq_irq(bnad, i);
+		if (err) {
+			printk(KERN_ERR "%s request irq for CQ %u failed %d\n",
+			    bnad->netdev->name, i, err);
+			while (--i >= 0) {
+				free_irq(entries[bnad->txq_num + i].vector,
+					 &bnad->cq_table[i]);
+			}
+			goto free_txq_irqs;
+		}
+	}
+
+	return 0;
+
+free_txq_irqs:
+	for (i = 0; i < bnad->txq_num; i++)
+		free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+	bnad_disable_msix(bnad);
+
+	return err;
+}
+
+static void bnad_free_txrx_irqs(struct bnad *bnad)
+{
+	struct msix_entry *entries;
+	uint i;
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		entries = bnad->msix_table;
+		for (i = 0; i < bnad->txq_num; i++)
+			free_irq(entries[i].vector, &bnad->txq_table[i]);
+
+		for (i = 0; i < bnad->cq_num; i++)
+			free_irq(entries[bnad->txq_num + i].vector,
+			    &bnad->cq_table[i]);
+	} else {
+		synchronize_irq(bnad->pcidev->irq);
+	}
+}
+
+void bnad_setup_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+
+	BNA_ASSERT(ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_ib_config_set(bnad->priv, ib_entry->ib, ib_id,
+	    &ib_entry->ib_config);
+	/* Start the IB */
+	bna_ib_ack(bnad->priv, ib_entry->ib, 0);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_ibs(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_ib(bnad, bnad->txq_table[i].txq_config.ib_id);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_ib(bnad, bnad->cq_table[i].cq_config.ib_id);
+}
+
+/* These functions are called back with priv_lock held. */
+
+static void bnad_lldp_get_cfg_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_attr_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->lldp_comp_status = status;
+	complete(&bnad->lldp_comp);
+}
+
+static void bnad_cee_get_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_stats_comp_status = status;
+	complete(&bnad->cee_stats_comp);
+}
+
+static void bnad_cee_reset_stats_cb(void *arg, bfa_status_t status)
+{
+	struct bnad *bnad = arg;
+	bnad->cee_reset_stats_status = status;
+	complete(&bnad->cee_reset_stats_comp);
+}
+
+static void bnad_ucast_set_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->ucast_comp_status = status;
+	complete(&bnad->ucast_comp);
+}
+
+static void bnad_q_stop_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = arg;
+
+	bnad->qstop_comp_status = status;
+	complete(&bnad->qstop_comp);
+}
+
+static void bnad_link_up_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct net_device *netdev = bnad->netdev;
+
+	DPRINTK(INFO, "%s bnad_link_up_cb\n", netdev->name);
+	if (netif_running(netdev)) {
+		if (!netif_carrier_ok(netdev) &&
+		    !test_bit(BNAD_DISABLED, &bnad->state)) {
+				printk(KERN_INFO "%s link up\n", netdev->name);
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+			bnad->stats.netif_queue_wakeup++;
+		}
+	}
+}
+
+static void bnad_link_down_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+	struct net_device *netdev = bnad->netdev;
+
+	DPRINTK(INFO, "%s bnad_link_down_cb\n", netdev->name);
+	if (netif_running(netdev)) {
+		if (netif_carrier_ok(netdev)) {
+			printk(KERN_INFO "%s link down\n", netdev->name);
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+			bnad->stats.netif_queue_stop++;
+		}
+	}
+}
+
+static void bnad_stats_get_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	bnad->stats.hw_stats_updates++;
+	if (!test_bit(BNAD_DISABLED, &bnad->state))
+		mod_timer(&bnad->stats_timer, jiffies + HZ);
+}
+
+/* Diagnostics */
+static void bnad_set_diag_lb_cb(void *arg, u8 status)
+{
+	struct bnad_diag_lb_params *dlbp =
+	    (struct bnad_diag_lb_params *)arg;
+
+	dlbp->diag_lb_comp_status = status;
+	DPRINTK(INFO, "bnad_set_diag_lb_cb() for %s %d\n",
+		dlbp->bnad->netdev->name, status);
+	complete(&dlbp->diag_lb_comp);
+}
+
+/* Called with bnad priv_lock held. */
+static void bnad_hw_error(struct bnad *bnad, u8 status)
+{
+	unsigned int irq;
+
+	bna_mbox_intr_disable(bnad->priv);
+	if (bnad->flags & BNAD_F_MSIX) {
+		if (!test_and_set_bit(BNAD_MBOX_IRQ_DISABLED, &bnad->state)) {
+			irq = bnad->msix_table[bnad->txq_num +
+			    bnad->cq_num].vector;
+			DPRINTK(WARNING, "Disabling Mbox IRQ %d for port %d\n",
+				irq, bnad->bna_id);
+			disable_irq_nosync(irq);
+		}
+	}
+
+	bna_cleanup(bnad->priv);
+	bnad->work_flags = BNAD_WF_ERROR;
+	if (!test_bit(BNAD_REMOVED, &bnad->state))
+		schedule_work(&bnad->work);
+}
+
+static void bnad_hw_error_cb(void *arg, u8 status)
+{
+	struct bnad *bnad = (struct bnad *)arg;
+
+	DPRINTK(WARNING, "port %d HW error callback %u\n",
+		bnad->bna_id, status);
+
+	bnad_hw_error(bnad, status);
+}
+
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth)
+{
+	/* Q_depth must be power of 2 for macros to work. */
+	BNA_ASSERT(BNA_POWER_OF_2(q_depth));
+	unmap_q->q_depth = q_depth;
+	unmap_q->unmap_array = vmalloc(q_depth *
+	    sizeof(struct bnad_skb_unmap));
+	if (!unmap_q->unmap_array)
+		return -ENOMEM;
+	memset(unmap_q->unmap_array, 0,
+	    q_depth * sizeof(struct bnad_skb_unmap));
+	return 0;
+}
+
+static int bnad_alloc_unmap_queues(struct bnad *bnad)
+{
+	int i, err = 0;
+	struct bnad_txq_info *txqinfo;
+	struct bnad_rxq_info *rxqinfo;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		err = bnad_alloc_unmap_q(&txqinfo->skb_unmap_q,
+		    txqinfo->txq.q.q_depth * 4);
+		DPRINTK(DEBUG, "%s allocating Tx unmap Q %d depth %u\n",
+			bnad->netdev->name, i, txqinfo->txq.q.q_depth * 4);
+		if (err) {
+			DPRINTK(ERR, "%s allocating Tx unmap Q %d failed: %d\n",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		err = bnad_alloc_unmap_q(&rxqinfo->skb_unmap_q,
+		    rxqinfo->rxq.q.q_depth);
+		DPRINTK(INFO, "%s allocating Rx unmap Q %d depth %u\n",
+			bnad->netdev->name, i, rxqinfo->rxq.q.q_depth);
+		if (err) {
+			DPRINTK(ERR, "%s allocating Rx unmap Q %d failed: %d\n",
+				bnad->netdev->name, i, err);
+			return err;
+		}
+	}
+	return 0;
+}
+
+/* Called with priv_lock. */
+static void bnad_flush_rxbufs(struct bnad_rxq_info *rxqinfo)
+{
+	struct bnad *bnad = rxqinfo->bnad;
+	struct bnad_unmap_q *unmap_q;
+	struct sk_buff *skb;
+	u32 cq_id;
+
+	unmap_q = &rxqinfo->skb_unmap_q;
+	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		BNA_ASSERT(skb);
+		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+		pci_unmap_single(bnad->pcidev,
+		    pci_unmap_addr(
+		    &unmap_q->unmap_array[unmap_q->consumer_index], dma_addr),
+		    rxqinfo->rxq_config.buffer_size + NET_IP_ALIGN,
+		    PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(skb);
+		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+		BNA_Q_CI_ADD(&rxqinfo->rxq, 1);
+	}
+
+	BNAD_RESET_Q(bnad, &rxqinfo->rxq.q, &rxqinfo->skb_unmap_q);
+	cq_id = rxqinfo->rxq_id / bnad_rxqs_per_cq;
+	*bnad->cq_table[cq_id].hw_producer_index = 0;
+}
+
+static int bnad_disable_txq(struct bnad *bnad, u32 txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+	txqinfo = &bnad->txq_table[txq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	err = bna_txq_stop(bnad->priv, txq_id);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err)
+		goto txq_stop_exit;
+
+	DPRINTK(INFO, "Waiting for %s TxQ %d stop reply\n",
+		bnad->netdev->name, txq_id);
+	wait_for_completion(&bnad->qstop_comp);
+
+	err = bnad->qstop_comp_status;
+txq_stop_exit:
+	if (err)
+		DPRINTK(ERR, "%s bna_txq_stop %d failed %d\n",
+			bnad->netdev->name, txq_id, err);
+	return err;
+}
+
+int bnad_disable_rxqs(struct bnad *bnad, u64 rxq_id_mask)
+{
+	int err;
+
+	struct timeval  tv;
+
+	BNA_ASSERT(!in_interrupt());
+
+	init_completion(&bnad->qstop_comp);
+
+	spin_lock_irq(&bnad->priv_lock);
+	do_gettimeofday(&tv);
+	DPRINTK(DEBUG, "Calling bna_multi_rxq_stop at %ld:%ld\n",
+		tv.tv_sec, tv.tv_usec);
+	err = bna_multi_rxq_stop(bnad->priv, rxq_id_mask);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err)
+		goto rxq_stop_exit;
+
+	DPRINTK(INFO, "Waiting for %s RxQs(0x%llx) stop reply\n",
+		bnad->netdev->name, rxq_id_mask);
+	wait_for_completion(&bnad->qstop_comp);
+
+	do_gettimeofday(&tv);
+	DPRINTK(DEBUG, "bna_multi_rxq_stop returned at %ld:%ld\n",
+		tv.tv_sec, tv.tv_usec);
+	err = bnad->qstop_comp_status;
+rxq_stop_exit:
+	if (err)
+		DPRINTK(ERR, "%s bna_multi_rxq_stop(0x%llx) failed %d\n",
+			bnad->netdev->name, rxq_id_mask, err);
+	return err;
+
+}
+
+static int bnad_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+	    container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	netif_rx_complete(bnad->netdev, napi);
+	bnad->stats.netif_rx_complete++;
+	bnad_enable_rx_irq(bnad, cqinfo);
+	return rcvd;
+}
+
+static int bnad_poll_txrx(struct napi_struct *napi, int budget)
+{
+	struct bnad_cq_info *cqinfo =
+	    container_of(napi, struct bnad_cq_info, napi);
+	struct bnad *bnad = cqinfo->bnad;
+	unsigned int rcvd;
+
+	bnad_tx(bnad, &bnad->txq_table[0]);
+	rcvd = bnad_poll_cq(bnad, cqinfo, budget);
+	if (rcvd == budget)
+		return rcvd;
+	netif_rx_complete(bnad->netdev, napi);
+	bnad->stats.netif_rx_complete++;
+	bnad_enable_txrx_irqs(bnad);
+	return rcvd;
+}
+
+static void bnad_napi_init(struct bnad *bnad)
+{
+	int (*napi_poll)(struct napi_struct *, int);
+	int i;
+
+	if (bnad->flags & BNAD_F_MSIX)
+		napi_poll = bnad_poll_rx;
+	else
+		napi_poll = bnad_poll_txrx;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_add(bnad->netdev, &bnad->cq_table[i].napi,
+		    napi_poll, 64);
+}
+
+static void bnad_napi_enable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_enable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_disable(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		napi_disable(&bnad->cq_table[i].napi);
+}
+
+static void bnad_napi_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < bnad->cq_num; i++)
+		netif_napi_del(&bnad->cq_table[i].napi);
+}
+
+
+static void bnad_detach(struct bnad *bnad)
+{
+	int i;
+
+	ASSERT_RTNL();
+
+	spin_lock_irq(&bnad->priv_lock);
+	if (!test_bit(BNAD_RESETTING, &bnad->state)) {
+		/* Graceful detach */
+
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	} else {
+		/* Error */
+		/* XXX Should not write to registers if RESETTING. */
+
+		bna_txf_disable(bnad->priv, BNAD_TX_FUNC_ID);
+		bna_rxf_disable_old(bnad->priv, BNAD_RX_FUNC_ID);
+
+		for (i = 0; i < bnad->txq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->txq_table[i].ib);
+		for (i = 0; i < bnad->cq_num; i++)
+			bna_ib_disable(bnad->priv, &bnad->cq_table[i].ib);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Wait to make sure Tx and Rx are stopped. */
+	msleep(1000);
+	bnad_free_txrx_irqs(bnad);
+	bnad_sync_mbox_irq(bnad);
+
+		bnad_napi_disable(bnad);
+		bnad_napi_uninit(bnad);
+
+	/* Delete the stats timer after synchronize with mbox irq. */
+	del_timer_sync(&bnad->stats_timer);
+		netif_tx_disable(bnad->netdev);
+		netif_carrier_off(bnad->netdev);
+}
+
+static int bnad_disable(struct bnad *bnad)
+{
+	int err, i;
+	u64 rxq_id_mask = 0;
+
+	ASSERT_RTNL();
+		DPRINTK(INFO, "bring %s link down\n", bnad->netdev->name);
+		spin_lock_irq(&bnad->priv_lock);
+		bna_port_admin(bnad->priv, BNA_DISABLE);
+		spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_detach(bnad);
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			return err;
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int bnad_sw_reset(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+
+	if (!netif_running(bnad->netdev))
+		return 0;
+
+	err = bnad_stop_locked(netdev);
+	if (err) {
+		DPRINTK(WARNING, "%s sw reset: disable failed %d\n",
+			bnad->netdev->name, err);
+		/* Recoverable */
+		return 0;
+	}
+
+	err = bnad_open_locked(netdev);
+	if (err) {
+		DPRINTK(WARNING, "%s sw reset: enable failed %d\n",
+			bnad->netdev->name, err);
+		return err;
+	}
+
+	return 0;
+}
+
+int bnad_resetting(struct bnad *bnad)
+{
+	rtnl_lock();
+	if (netif_running(bnad->netdev))
+		bnad_stop_locked(bnad->netdev);
+	set_bit(BNAD_RESETTING, &bnad->state);
+	rtnl_unlock();
+	return 0;
+}
+
+int bnad_alloc_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	ib_entry->ib_seg_addr = pci_alloc_consistent(bnad->pcidev,
+	    L1_CACHE_BYTES, &dma_addr);
+	if (!ib_entry->ib_seg_addr)
+		return -ENOMEM;
+	DPRINTK(DEBUG, "%s IB %d dma addr 0x%llx\n",
+		bnad->netdev->name, ib_id, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &ib_entry->ib_config.ib_seg_addr);
+	return 0;
+}
+static int bnad_alloc_ibs(struct bnad *bnad)
+{
+	uint i;
+	int err;
+
+	bnad->ib_num = bnad->txq_num + bnad->cq_num;
+	bnad->ib_table = kzalloc(bnad->ib_num *
+	    sizeof(struct bnad_ib_entry), GFP_KERNEL);
+	if (!bnad->ib_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->ib_num; i++) {
+		err = bnad_alloc_ib(bnad, i);
+		if (err)
+			goto free_ibs;
+	}
+	return 0;
+
+free_ibs:
+	bnad_free_ibs(bnad);
+	return err;
+}
+
+void bnad_free_ib(struct bnad *bnad, uint ib_id)
+{
+	struct bnad_ib_entry *ib_entry;
+	dma_addr_t dma_addr;
+
+	BNA_ASSERT(bnad->ib_table && ib_id < bnad->ib_num);
+	ib_entry = &bnad->ib_table[ib_id];
+	if (ib_entry->ib_seg_addr) {
+		BNA_GET_DMA_ADDR(&ib_entry->ib_config.ib_seg_addr, dma_addr);
+		pci_free_consistent(bnad->pcidev, L1_CACHE_BYTES,
+		    ib_entry->ib_seg_addr, dma_addr);
+		ib_entry->ib_seg_addr = NULL;
+	}
+}
+
+static void bnad_free_ibs(struct bnad *bnad)
+{
+	uint i;
+
+	if (!bnad->ib_table)
+		return;
+
+	for (i = 0; i < bnad->ib_num; i++)
+		bnad_free_ib(bnad, i);
+	kfree(bnad->ib_table);
+	bnad->ib_table = NULL;
+}
+
+/* Let the caller deal with error - free memory. */
+static int bnad_alloc_q(struct bnad *bnad, struct bna_qpt *qpt,
+	struct bna_q *q, size_t qsize)
+{
+	size_t i;
+	dma_addr_t dma_addr;
+
+	qsize = ALIGN(qsize, PAGE_SIZE);
+	qpt->page_count = qsize >> PAGE_SHIFT;
+	qpt->page_size = PAGE_SIZE;
+
+	DPRINTK(DEBUG, "qpt page count 0x%x, ", qpt->page_count);
+	DPRINTK(DEBUG, "page size 0x%x\n", qpt->page_size);
+
+	qpt->kv_qpt_ptr = pci_alloc_consistent(bnad->pcidev,
+	    qpt->page_count * sizeof(struct bna_dma_addr), &dma_addr);
+	if (!qpt->kv_qpt_ptr)
+		return -ENOMEM;
+	BNA_SET_DMA_ADDR(dma_addr, &qpt->hw_qpt_ptr);
+	DPRINTK(DEBUG, "qpt host addr %p, ", qpt->kv_qpt_ptr);
+	DPRINTK(DEBUG, "dma addr 0x%llx\n", dma_addr);
+
+	q->qpt_ptr = kzalloc(qpt->page_count * sizeof(void *), GFP_KERNEL);
+	if (!q->qpt_ptr)
+		return -ENOMEM;
+	qpt->qpt_ptr = q->qpt_ptr;
+	for (i = 0; i < qpt->page_count; i++) {
+		q->qpt_ptr[i] = pci_alloc_consistent(bnad->pcidev, PAGE_SIZE,
+		    &dma_addr);
+		if (!q->qpt_ptr[i])
+			return -ENOMEM;
+		BNA_SET_DMA_ADDR(dma_addr,
+		    &((struct bna_dma_addr *)qpt->kv_qpt_ptr)[i]);
+
+		DPRINTK(DEBUG, "page %d ", (int)i);
+		DPRINTK(DEBUG, "host addr %p, ", q->qpt_ptr[i]);
+		DPRINTK(DEBUG, "dma addr 0x%llx\n", dma_addr);
+	}
+
+	return 0;
+}
+
+static void
+bnad_free_q(struct bnad *bnad, struct bna_qpt *qpt, struct bna_q *q)
+{
+	int i;
+	dma_addr_t dma_addr;
+
+	if (qpt->kv_qpt_ptr && q->qpt_ptr) {
+		for (i = 0; i < qpt->page_count; i++) {
+			if (q->qpt_ptr[i]) {
+				BNA_GET_DMA_ADDR(
+				    &((struct bna_dma_addr *)
+					qpt->kv_qpt_ptr)[i], dma_addr);
+				pci_free_consistent(bnad->pcidev, PAGE_SIZE,
+				    q->qpt_ptr[i], dma_addr);
+			}
+		}
+	}
+
+	kfree(q->qpt_ptr);
+	qpt->qpt_ptr = q->qpt_ptr = NULL;
+
+	if (qpt->kv_qpt_ptr) {
+		BNA_GET_DMA_ADDR(&qpt->hw_qpt_ptr, dma_addr);
+		pci_free_consistent(bnad->pcidev,
+		    qpt->page_count * sizeof(struct bna_dma_addr),
+		    qpt->kv_qpt_ptr, dma_addr);
+		qpt->kv_qpt_ptr = NULL;
+	}
+}
+
+static void bnad_free_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+	if (txqinfo->skb_unmap_q.unmap_array) {
+		bnad_free_txbufs(txqinfo, txqinfo->txq.q.producer_index);
+		vfree(txqinfo->skb_unmap_q.unmap_array);
+		txqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+	if (rxqinfo->skb_unmap_q.unmap_array) {
+		bnad_flush_rxbufs(rxqinfo);
+		vfree(rxqinfo->skb_unmap_q.unmap_array);
+		rxqinfo->skb_unmap_q.unmap_array = NULL;
+	}
+}
+
+void bnad_free_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo = &bnad->cq_table[cq_id];
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+	vfree(cqinfo->lro.lro_arr);
+	cqinfo->lro.lro_arr = NULL;
+}
+
+static void bnad_free_queues(struct bnad *bnad)
+{
+	uint i;
+
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++)
+			bnad_free_txq(bnad, i);
+		kfree(bnad->txq_table);
+		bnad->txq_table = NULL;
+	}
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++)
+			bnad_free_rxq(bnad, i);
+		kfree(bnad->rxq_table);
+		bnad->rxq_table = NULL;
+	}
+
+	if (bnad->cq_table) {
+		for (i = 0; i < bnad->cq_num; i++)
+			bnad_free_cq(bnad, i);
+		kfree(bnad->cq_table);
+		bnad->cq_table = NULL;
+	}
+}
+
+static int bnad_txq_init(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->txq_table && txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	DPRINTK(DEBUG, "%s allocating TxQ %d\n", bnad->netdev->name, txq_id);
+	err = bnad_alloc_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q,
+	    bnad->txq_depth * sizeof(struct bna_txq_entry));
+	if (err) {
+		bnad_free_q(bnad, &txqinfo->txq_config.qpt, &txqinfo->txq.q);
+		return err;
+	}
+	txqinfo->txq.q.q_depth = bnad->txq_depth;
+	txqinfo->bnad = bnad;
+	txqinfo->txq_config.txf_id = BNAD_TX_FUNC_ID;
+	snprintf(txqinfo->name, sizeof(txqinfo->name), "%s TxQ %d",
+	    bnad->netdev->name, txq_id);
+	return 0;
+}
+
+static int bnad_txqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->txq_table = kzalloc(bnad->txq_num *
+	    sizeof(struct bnad_txq_info), GFP_KERNEL);
+	if (!bnad->txq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_txq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	DPRINTK(DEBUG, "%s allocating RxQ %d\n", bnad->netdev->name, rxq_id);
+	err = bnad_alloc_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q,
+	    bnad->rxq_depth * sizeof(struct bna_rxq_entry));
+	if (err) {
+		bnad_free_q(bnad, &rxqinfo->rxq_config.qpt, &rxqinfo->rxq.q);
+		return err;
+	}
+	rxqinfo->rxq.q.q_depth = bnad->rxq_depth;
+	rxqinfo->bnad = bnad;
+	rxqinfo->rxq_id = rxq_id;
+	rxqinfo->rxq_config.cq_id = rxq_id / bnad_rxqs_per_cq;
+
+	return 0;
+}
+
+static int bnad_rxqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->rxq_table = kzalloc(bnad->rxq_num *
+	    sizeof(struct bnad_rxq_info), GFP_KERNEL);
+	if (!bnad->rxq_table)
+		return -EINVAL;
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		err = bnad_rxq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int bnad_cq_init(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+	int err;
+
+	BNA_ASSERT(bnad->cq_table && cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	DPRINTK(DEBUG, "%s allocating CQ %d\n", bnad->netdev->name, cq_id);
+	err = bnad_alloc_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q,
+	    bnad->rxq_depth * bnad_rxqs_per_cq * sizeof(struct bna_cq_entry));
+	if (err) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+
+	cqinfo->cq.q.q_depth = bnad->rxq_depth * bnad_rxqs_per_cq;
+	cqinfo->bnad = bnad;
+
+	cqinfo->lro.dev = bnad->netdev;
+	cqinfo->lro.features |= LRO_F_NAPI;
+	if (bnad_vlan_strip)
+		cqinfo->lro.features |= LRO_F_EXTRACT_VLAN_ID;
+	cqinfo->lro.ip_summed = CHECKSUM_UNNECESSARY;
+	cqinfo->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+	cqinfo->lro.max_desc = BNAD_LRO_MAX_DESC;
+	cqinfo->lro.max_aggr = BNAD_LRO_MAX_AGGR;
+	/* XXX */
+	cqinfo->lro.frag_align_pad = 0;
+	cqinfo->lro.lro_arr = vmalloc(BNAD_LRO_MAX_DESC *
+	    sizeof(struct net_lro_desc));
+	if (!cqinfo->lro.lro_arr) {
+		bnad_free_q(bnad, &cqinfo->cq_config.qpt, &cqinfo->cq.q);
+		return err;
+	}
+	memset(cqinfo->lro.lro_arr, 0, BNAD_LRO_MAX_DESC *
+	    sizeof(struct net_lro_desc));
+	cqinfo->lro.get_skb_header = bnad_lro_get_skb_header;
+
+	cqinfo->rx_coalescing_timeo = bnad->rx_coalescing_timeo;
+
+	cqinfo->cq_id = cq_id;
+	snprintf(cqinfo->name, sizeof(cqinfo->name), "%s CQ %d",
+	    bnad->netdev->name, cq_id);
+
+	return 0;
+}
+
+static int bnad_cqs_init(struct bnad *bnad)
+{
+	int i, err = 0;
+
+	bnad->cq_table = kzalloc(bnad->cq_num * sizeof(struct bnad_cq_info),
+	    GFP_KERNEL);
+	if (!bnad->cq_table)
+		return -ENOMEM;
+
+	for (i = 0; i < bnad->cq_num; i++) {
+		err = bnad_cq_init(bnad, i);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static uint bnad_get_qsize(uint qsize_conf, uint mtu)
+{
+	uint qsize;
+
+	if (mtu > ETH_DATA_LEN) {
+		qsize = qsize_conf / (mtu / ETH_DATA_LEN);
+		if (!BNA_POWER_OF_2(qsize))
+			BNA_TO_POWER_OF_2_HIGH(qsize);
+		if (qsize < BNAD_MIN_Q_DEPTH)
+			qsize = BNAD_MIN_Q_DEPTH;
+	} else
+		qsize = bnad_txq_depth;
+
+	return qsize;
+}
+
+static int bnad_init_queues(struct bnad *bnad)
+{
+	int err;
+
+	if (!(bnad->flags & BNAD_F_TXQ_DEPTH))
+		bnad->txq_depth = bnad_get_qsize(bnad_txq_depth,
+		    bnad->netdev->mtu);
+	if (!(bnad->flags & BNAD_F_RXQ_DEPTH))
+		bnad->rxq_depth = bnad_get_qsize(bnad_rxq_depth,
+		    bnad->netdev->mtu);
+
+	err = bnad_txqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_rxqs_init(bnad);
+	if (err)
+		return err;
+
+	err = bnad_cqs_init(bnad);
+
+	return err;
+}
+
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id)
+{
+	struct bnad_cq_info *cqinfo;
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+
+	BNA_ASSERT(cq_id < bnad->cq_num && ib_id < bnad->ib_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	ib_entry = &bnad->ib_table[ib_id];
+
+	cqinfo->hw_producer_index = (u32 *)(ib_entry->ib_seg_addr);
+	cqinfo->cq_config.ib_id = ib_id;
+	cqinfo->cq_config.ib_seg_index = 0;
+
+	ib_entry->ib = &cqinfo->ib;
+	ib_config = &ib_entry->ib_config;
+	ib_config->coalescing_timer = bnad->rx_coalescing_timeo;
+#if 1
+	ib_config->control_flags = BNA_IB_CF_INT_ENABLE |
+	    BNA_IB_CF_MASTER_ENABLE;
+#else
+	ib_config->control_flags = BNA_IB_CF_INT_ENABLE |
+	    BNA_IB_CF_INTER_PKT_ENABLE | BNA_IB_CF_MASTER_ENABLE;
+	ib_config->interpkt_count = bnad->rx_interpkt_count;
+	ib_config->interpkt_timer = bnad->rx_interpkt_timeo;
+#endif
+	if (bnad->flags & BNAD_F_MSIX) {
+		ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+		ib_config->msix_vector = ib_id;
+	} else
+		ib_config->msix_vector = 1 << ib_id;
+
+	/* Every CQ has its own IB. */
+	ib_config->seg_size = 1;
+	ib_config->index_table_offset = ib_id;
+}
+
+static void bnad_ibs_init(struct bnad *bnad)
+{
+	struct bnad_ib_entry *ib_entry;
+	struct bna_ib_config *ib_config;
+	struct bnad_txq_info *txqinfo;
+
+	int ib_id, i;
+
+	ib_id = 0;
+	for (i = 0; i < bnad->txq_num; i++) {
+		txqinfo = &bnad->txq_table[i];
+		ib_entry = &bnad->ib_table[ib_id];
+
+		txqinfo->hw_consumer_index = ib_entry->ib_seg_addr;
+		txqinfo->txq_config.ib_id = ib_id;
+		txqinfo->txq_config.ib_seg_index = 0;
+
+		ib_entry->ib = &txqinfo->ib;
+		ib_config = &ib_entry->ib_config;
+		ib_config->coalescing_timer = bnad->tx_coalescing_timeo;
+		ib_config->control_flags = BNA_IB_CF_INTER_PKT_DMA |
+		    BNA_IB_CF_INT_ENABLE | BNA_IB_CF_COALESCING_MODE |
+		    BNA_IB_CF_MASTER_ENABLE;
+		if (bnad->flags & BNAD_F_MSIX) {
+			ib_config->control_flags |= BNA_IB_CF_MSIX_MODE;
+			ib_config->msix_vector = ib_id;
+		} else
+			ib_config->msix_vector = 1 << ib_id;
+		ib_config->interpkt_count = bnad->tx_interpkt_count;
+
+		/* Every TxQ has its own IB. */
+		ib_config->seg_size = 1;
+		ib_config->index_table_offset = ib_id;
+		ib_id++;
+	}
+
+	for (i = 0; i < bnad->cq_num; i++, ib_id++)
+		bnad_rxib_init(bnad, i, ib_id);
+}
+
+static void bnad_txf_init(struct bnad *bnad, uint txf_id)
+{
+	struct bnad_txf_info *txf_info;
+
+	BNA_ASSERT(bnad->txf_table && txf_id < bnad->txf_num);
+	txf_info = &bnad->txf_table[txf_id];
+	txf_info->txf_id = txf_id;
+	txf_info->txf_config.flags = BNA_TXF_CF_VLAN_WI_BASED |
+	    BNA_TXF_CF_ENABLE;
+}
+
+void bnad_rxf_init(struct bnad *bnad, uint rxf_id, u8 rit_offset, int rss)
+{
+	struct bnad_rxf_info *rxf_info;
+
+	BNA_ASSERT(bnad->rxf_table && rxf_id < bnad->rxf_num);
+	rxf_info = &bnad->rxf_table[rxf_id];
+	rxf_info->rxf_id = rxf_id;
+	rxf_info->rxf_config.rit_offset = rit_offset;
+	rxf_info->rxf_config.mcast_rxq_id = BNAD_MULTICAST_RXQ_ID;
+	if (bnad_small_large_rxbufs)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_SM_LG_RXQ;
+	if (bnad_vlan_strip)
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_VLAN_STRIP;
+	if (rss) {
+		struct bna_rxf_rss *rxf_rss;
+
+		rxf_info->rxf_config.flags |= BNA_RXF_CF_RSS_ENABLE;
+		rxf_rss = &rxf_info->rxf_config.rss;
+		rxf_rss->type = BNA_RSS_V4_TCP | BNA_RSS_V4_IP |
+		    BNA_RSS_V6_TCP | BNA_RSS_V6_IP;
+		rxf_rss->hash_mask = bnad->cq_num - 1;
+		get_random_bytes(rxf_rss->toeplitz_hash_key,
+		    sizeof(rxf_rss->toeplitz_hash_key));
+	}
+	DPRINTK(DEBUG, "%s RxF %u config flags 0x%x\n",
+		bnad->netdev->name, rxf_id, rxf_info->rxf_config.flags);
+}
+
+static int bnad_init_funcs(struct bnad *bnad)
+{
+	bnad->txf_table = kzalloc(sizeof(struct bnad_txf_info) * bnad->txf_num,
+				  GFP_KERNEL);
+	if (!bnad->txf_table)
+		return -ENOMEM;
+	bnad_txf_init(bnad, BNAD_TX_FUNC_ID);
+
+	bnad->rxf_table = kzalloc(sizeof(struct bnad_rxf_info) * bnad->rxf_num,
+				  GFP_KERNEL);
+	if (!bnad->rxf_table)
+		return -ENOMEM;
+	bnad_rxf_init(bnad, BNAD_RX_FUNC_ID, BNAD_RIT_OFFSET,
+	    (bnad->cq_num > 1) ? 1 : 0);
+	return 0;
+}
+
+static void bnad_setup_txq(struct bnad *bnad, uint txq_id)
+{
+	struct bnad_txq_info *txqinfo;
+
+	BNA_ASSERT(txq_id < bnad->txq_num);
+	txqinfo = &bnad->txq_table[txq_id];
+	txqinfo->txq_config.priority = txq_id;
+	/*  Set wrr_quota properly if multiple priorities/TxQs are enabled. */
+	txqinfo->txq_config.wrr_quota = BNAD_TX_MAX_WRR_QUOTA;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txq_config(bnad->priv, &txqinfo->txq, txq_id,
+	    &txqinfo->txq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo;
+
+	BNA_ASSERT(rxq_id < bnad->rxq_num);
+	rxqinfo = &bnad->rxq_table[rxq_id];
+	/*
+	 * Every RxQ set has 2 RxQs: the first is large buffer RxQ,
+	 * the second is small buffer RxQ.
+	 */
+	if ((rxq_id % bnad_rxqs_per_cq) == 0)
+		rxqinfo->rxq_config.buffer_size =
+		    (bnad_vlan_strip ? VLAN_ETH_HLEN : ETH_HLEN) +
+		    bnad->netdev->mtu + ETH_FCS_LEN;
+	else
+		rxqinfo->rxq_config.buffer_size = BNAD_SMALL_RXBUF_SIZE;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxq_config(bnad->priv, &rxqinfo->rxq, rxq_id,
+	    &rxqinfo->rxq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_setup_cq(struct bnad *bnad, uint cq_id)
+{
+	struct bnad_cq_info *cqinfo;
+
+	BNA_ASSERT(cq_id < bnad->cq_num);
+	cqinfo = &bnad->cq_table[cq_id];
+	spin_lock_irq(&bnad->priv_lock);
+	bna_cq_config(bnad->priv, &cqinfo->cq, cq_id,
+	    &cqinfo->cq_config);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+static void bnad_setup_queues(struct bnad *bnad)
+{
+	uint i;
+
+	for (i = 0; i < bnad->txq_num; i++)
+		bnad_setup_txq(bnad, i);
+
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_setup_rxq(bnad, i);
+
+	for (i = 0; i < bnad->cq_num; i++)
+		bnad_setup_cq(bnad, i);
+}
+
+
+static void bnad_setup_rit(struct bnad *bnad)
+{
+	int i, size;
+
+	size = bnad->cq_num;
+
+	for (i = 0; i < size; i++) {
+		if (bnad_small_large_rxbufs) {
+			bnad->rit[i].large_rxq_id = (i << 1);
+			bnad->rit[i].small_rxq_id = (i << 1) + 1;
+		} else
+			bnad->rit[i].large_rxq_id = i;
+	}
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rit_config_set(bnad->priv, BNAD_RIT_OFFSET,
+	    bnad->rit, size);
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id)
+{
+	struct bnad_rxq_info *rxqinfo = &bnad->rxq_table[rxq_id];
+	u16 rxbufs;
+
+	BNA_ASSERT(bnad->rxq_table && rxq_id < bnad->rxq_num);
+	bnad_alloc_rxbufs(rxqinfo);
+	rxbufs = BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+	    rxqinfo->skb_unmap_q.q_depth);
+	DPRINTK(INFO, "%s allocated %u rx buffers for RxQ %u\n",
+		bnad->netdev->name, rxbufs, rxq_id);
+}
+
+static int bnad_config_hw(struct bnad *bnad)
+{
+	int i, err;
+	u64 rxq_id_mask = 0;
+	struct sockaddr sa;
+	struct net_device *netdev = bnad->netdev;
+
+	spin_lock_irq(&bnad->priv_lock);
+	/* Disable the RxF until later bringing port up. */
+	bna_multi_rxf_disable(bnad->priv, (1 << bnad->rxf_num) - 1);
+	spin_unlock_irq(&bnad->priv_lock);
+	for (i = 0; i < bnad->txq_num; i++) {
+		err = bnad_disable_txq(bnad, i);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		rxq_id_mask |= (1 << i);
+	if (rxq_id_mask) {
+		err = bnad_disable_rxqs(bnad, rxq_id_mask);
+		if (err)
+			return err;
+	}
+
+	bnad_setup_queues(bnad);
+
+	bnad_setup_rit(bnad);
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_txf_config_set(bnad->priv, BNAD_TX_FUNC_ID,
+	    &bnad->txf_table->txf_config);
+	for (i = 0; i < bnad->rxf_num; i++) {
+		bna_rxf_config_set(bnad->priv, i,
+		    &bnad->rxf_table[i].rxf_config);
+		bna_rxf_vlan_filter(bnad->priv, i, BNA_ENABLE);
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+
+	/* Mailbox should be enabled before this! */
+	memcpy(sa.sa_data, netdev->dev_addr, netdev->addr_len);
+	bnad_set_mac_address_locked(netdev, &sa);
+
+	spin_lock_irq(&bnad->priv_lock);
+	/* Receive broadcasts */
+	bna_rxf_broadcast(bnad->priv, BNAD_RX_FUNC_ID, BNA_ENABLE);
+
+	bna_mtu_info(bnad->priv, netdev->mtu, bnad);
+	bna_set_pause_config(bnad->priv, &bnad->pause_config, bnad);
+
+	bna_rxf_mcast_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	bna_mcast_mac_reset_list(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	bnad_set_rx_mode_locked(bnad->netdev);
+
+	bnad_reconfig_vlans(bnad);
+
+	bnad_setup_ibs(bnad);
+
+	return 0;
+}
+
+/* Note: bnad_cleanup doesn't not free irqs and queues. */
+static void bnad_cleanup(struct bnad *bnad)
+{
+	kfree(bnad->rit);
+	bnad->rit = NULL;
+	kfree(bnad->txf_table);
+	bnad->txf_table = NULL;
+	kfree(bnad->rxf_table);
+	bnad->rxf_table = NULL;
+
+	bnad_free_ibs(bnad);
+	bnad_free_queues(bnad);
+}
+
+/* Should be called with rtnl_lock held. */
+static int bnad_start(struct bnad *bnad)
+{
+	int err;
+
+	ASSERT_RTNL();
+
+	err = bnad_alloc_ibs(bnad);
+	if (err)
+		return err;
+
+	err = bnad_init_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad_ibs_init(bnad);
+
+	err = bnad_init_funcs(bnad);
+	if (err)
+		goto finished;
+
+	err = bnad_alloc_unmap_queues(bnad);
+	if (err)
+		goto finished;
+
+	bnad->rit = kzalloc(bnad->cq_num * sizeof(struct bna_rit_entry),
+	    GFP_KERNEL);
+
+	if (!bnad->rit)
+		goto finished;
+
+	err = bnad_config_hw(bnad);
+	if (err)
+		goto finished;
+
+		bnad_napi_init(bnad);
+		bnad_napi_enable(bnad);
+
+	err = bnad_request_txrx_irqs(bnad);
+	if (err) {
+		DPRINTK(ERR, "%s requests Tx/Rx irqs failed: %d\n",
+			bnad->netdev->name, err);
+		goto finished;
+	}
+	return 0;
+
+finished:
+	bnad_cleanup(bnad);
+	return err;
+}
+
+int bnad_open_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	uint i;
+	int err;
+
+	ASSERT_RTNL();
+	DPRINTK(WARNING, "%s open\n", netdev->name);
+
+	if (BNAD_NOT_READY(bnad)) {
+		DPRINTK(WARNING, "%s is not ready yet (0x%lx)\n",
+			netdev->name, bnad->state);
+		return 0;
+	}
+
+	if (!test_bit(BNAD_DISABLED, &bnad->state)) {
+		DPRINTK(WARNING, "%s is already opened (0x%lx)\n",
+			netdev->name, bnad->state);
+
+		return 0;
+	}
+
+	err = bnad_start(bnad);
+	if (err) {
+		DPRINTK(ERR, "%s failed to start %d\n", netdev->name, err);
+		return err;
+	}
+	for (i = 0; i < bnad->rxq_num; i++)
+		bnad_alloc_for_rxq(bnad, i);
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_DISABLED, &bnad->state);
+	DPRINTK(INFO, "%s is opened\n", bnad->netdev->name);
+
+	/* XXX Packet may be come before we bring the port up. */
+	spin_lock_irq(&bnad->priv_lock);
+
+	/* RxF was disabled earlier. */
+	bna_rxf_enable(bnad->priv, BNAD_RX_FUNC_ID);
+	spin_unlock_irq(&bnad->priv_lock);
+
+
+	DPRINTK(INFO, "Bring %s link up\n", netdev->name);
+		spin_lock_irq(&bnad->priv_lock);
+		bna_port_admin(bnad->priv, BNA_ENABLE);
+		spin_unlock_irq(&bnad->priv_lock);
+
+	mod_timer(&bnad->stats_timer, jiffies + HZ);
+
+	return 0;
+}
+
+int bnad_stop_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	ASSERT_RTNL();
+	DPRINTK(WARNING, "%s stop\n", netdev->name);
+
+	if (test_and_set_bit(BNAD_DISABLED, &bnad->state)) {
+		if (BNAD_NOT_READY(bnad))
+			DPRINTK(WARNING, "%s is not ready (0x%lx)\n",
+				netdev->name, bnad->state);
+		else
+			DPRINTK(WARNING, "%s is already stopped (0x%lx)\n",
+				netdev->name, bnad->state);
+		return 0;
+	}
+
+	bnad_disable(bnad);
+	bnad_cleanup(bnad);
+	DPRINTK(INFO, "%s is stopped\n", bnad->netdev->name);
+	return 0;
+}
+
+int bnad_open(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int error = 0;
+
+	bnad_lock();
+	if (!test_bit(BNAD_PORT_DISABLED, &bnad->state))
+		error = bnad_open_locked(netdev);
+	bnad_unlock();
+	return error;
+}
+
+int bnad_stop(struct net_device *netdev)
+{
+	int error = 0;
+
+	bnad_lock();
+	error = bnad_stop_locked(netdev);
+	bnad_unlock();
+	return error;
+}
+
+static int bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+#ifdef NETIF_F_TSO
+	int err;
+
+#ifdef SKB_GSO_TCPV4
+	/* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
+	BNA_ASSERT(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+	    skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6);
+#endif
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err) {
+			bnad->stats.tso_err++;
+			return err;
+		}
+	}
+
+	/*
+	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
+	 * excluding the length field.
+	 */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		/* Do we really need these? */
+		iph->tot_len = 0;
+		iph->check = 0;
+
+		tcp_hdr(skb)->check = ~csum_tcpudp_magic(
+		    iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0);
+		bnad->stats.tso4++;
+#ifdef NETIF_F_TSO6
+	} else {
+		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+		BNA_ASSERT(skb->protocol == htons(ETH_P_IPV6));
+		ipv6h->payload_len = 0;
+		tcp_hdr(skb)->check = ~csum_ipv6_magic(
+		    &ipv6h->saddr, &ipv6h->daddr, 0, IPPROTO_TCP, 0);
+		bnad->stats.tso6++;
+#endif
+	}
+
+	return 0;
+#else
+	return -EINVAL;
+#endif
+}
+
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_txq_info *txqinfo;
+	struct bna_txq *txq;
+	struct bnad_unmap_q *unmap_q;
+	u16 txq_prod;
+	unsigned int unmap_prod, wis, wis_used, wi_range;
+	unsigned int vectors, vect_id, i, acked;
+	int err;
+	dma_addr_t dma_addr;
+	struct bna_txq_entry *txqent;
+	bna_txq_wi_ctrl_flag_t flags;
+
+	if (unlikely(skb->len <= ETH_HLEN ||
+	    skb->len > BNAD_TX_MAX_DATA_PER_WI)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	txqinfo = &bnad->txq_table[0];
+	txq = &txqinfo->txq;
+	unmap_q = &txqinfo->skb_unmap_q;
+
+	vectors = 1 + skb_shinfo(skb)->nr_frags;
+	if (vectors > BNAD_TX_MAX_VECTORS) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	wis = BNAD_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
+	acked = 0;
+	if (unlikely(wis > BNA_Q_FREE_COUNT(txq) ||
+	    vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+		if ((u16)(*txqinfo->hw_consumer_index) !=
+		    txq->q.consumer_index &&
+		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+			acked = bnad_free_txbufs(txqinfo,
+			    (u16)(*txqinfo->hw_consumer_index));
+			bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+			DPRINTK(DEBUG, "%s ack TxQ IB %u packets\n",
+				netdev->name, acked);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+		} else {
+				netif_stop_queue(netdev);
+		}
+
+		smp_mb();
+		/*
+		 * Check again to deal with race condition between
+		 * netif_stop_queue here, and netif_wake_queue in
+		 * interrupt handler which is not inside netif tx lock.
+		 */
+		if (likely(wis > BNA_Q_FREE_COUNT(txq) ||
+		    vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+			bnad->stats.netif_queue_stop++;
+			return NETDEV_TX_BUSY;
+		} else {
+				netif_wake_queue(netdev);
+		}
+	}
+
+	unmap_prod = unmap_q->producer_index;
+	wis_used = 1;
+	vect_id = 0;
+	flags = 0;
+
+	txq_prod = txq->q.producer_index;
+	BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent, wi_range);
+	BNA_ASSERT(wi_range && wi_range <= txq->q.q_depth);
+	txqent->hdr.wi.reserved = 0;
+	txqent->hdr.wi.num_vectors = vectors;
+	txqent->hdr.wi.opcode = htons((skb_is_gso(skb) ?
+	    BNA_TXQ_WI_SEND_LSO : BNA_TXQ_WI_SEND));
+
+	if (bnad_ipid_mode)
+		flags |= BNA_TXQ_WI_CF_IPID_MODE;
+
+	if (bnad->vlangrp && vlan_tx_tag_present(skb)) {
+		u16 vlan_tag = (u16)vlan_tx_tag_get(skb);
+		if ((vlan_tag >> 13) & 0x7)
+			flags |= BNA_TXQ_WI_CF_INS_PRIO;
+		if (vlan_tag & VLAN_VID_MASK)
+			flags |= BNA_TXQ_WI_CF_INS_VLAN;
+		txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+	} else
+		txqent->hdr.wi.vlan_tag = 0;
+
+	if (skb_is_gso(skb)) {
+		err = bnad_tso_prepare(bnad, skb);
+		if (err) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+		txqent->hdr.wi.l4_hdr_size_n_offset = htons(
+		    BNA_TXQ_WI_L4_HDR_N_OFFSET(tcp_hdrlen(skb) >> 2,
+		    skb_transport_offset(skb)));
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = 0;
+
+		txqent->hdr.wi.lso_mss = 0;
+
+		if (skb->protocol == htons(ETH_P_IP))
+			proto = ip_hdr(skb)->protocol;
+#ifdef NETIF_F_IPV6_CSUM
+		else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* XXX the nexthdr may not be TCP immediately. */
+			proto = ipv6_hdr(skb)->nexthdr;
+		}
+#endif
+		if (proto == IPPROTO_TCP) {
+			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset = htons(
+			    BNA_TXQ_WI_L4_HDR_N_OFFSET(0,
+			    skb_transport_offset(skb)));
+			bnad->stats.tcpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+			    skb_transport_offset(skb) + tcp_hdrlen(skb));
+		} else if (proto == IPPROTO_UDP) {
+			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+			txqent->hdr.wi.l4_hdr_size_n_offset = htons(
+			    BNA_TXQ_WI_L4_HDR_N_OFFSET(0,
+			    skb_transport_offset(skb)));
+			bnad->stats.udpcsum_offload++;
+			BNA_ASSERT(skb_headlen(skb) >=
+			    skb_transport_offset(skb) + sizeof(struct udphdr));
+		} else {
+			err = skb_checksum_help(skb);
+			bnad->stats.csum_help++;
+			if (err) {
+				dev_kfree_skb(skb);
+				bnad->stats.csum_help_err++;
+				return NETDEV_TX_OK;
+			}
+		}
+	} else {
+		txqent->hdr.wi.lso_mss = 0;
+		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+	}
+
+	txqent->hdr.wi.flags = htons(flags);
+
+	txqent->hdr.wi.frame_length = htonl(skb->len);
+
+	unmap_q->unmap_array[unmap_prod].skb = skb;
+	BNA_ASSERT(skb_headlen(skb) <= BNAD_TX_MAX_DATA_PER_VECTOR);
+	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+	    PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+	    dma_addr);
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+		if (++vect_id == BNAD_TX_MAX_VECTORS_PER_WI) {
+			vect_id = 0;
+			if (--wi_range)
+				txqent++;
+			else {
+				BNA_QE_INDX_ADD(txq_prod, wis_used,
+				    txq->q.q_depth);
+				wis_used = 0;
+				BNA_TXQ_QPGE_PTR_GET(txq_prod, &txq->q, txqent,
+				    wi_range);
+				BNA_ASSERT(wi_range &&
+				    wi_range <= txq->q.q_depth);
+			}
+			wis_used++;
+			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+		}
+
+		BNA_ASSERT(frag->size <= BNAD_TX_MAX_DATA_PER_VECTOR);
+		txqent->vector[vect_id].length = htons(frag->size);
+		BNA_ASSERT(unmap_q->unmap_array[unmap_prod].skb == NULL);
+		dma_addr = pci_map_page(bnad->pcidev, frag->page,
+		    frag->page_offset, frag->size, PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+		    dma_addr);
+		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+	}
+
+	unmap_q->producer_index = unmap_prod;
+	BNA_QE_INDX_ADD(txq_prod, wis_used, txq->q.q_depth);
+	txq->q.producer_index = txq_prod;
+
+	smp_mb();
+	bna_txq_prod_indx_doorbell(txq);
+	netdev->trans_start = jiffies;
+
+	if ((u16)(*txqinfo->hw_consumer_index) !=
+	    txq->q.consumer_index &&
+	    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags)) {
+		acked = bnad_free_txbufs(txqinfo,
+		    (u16)(*txqinfo->hw_consumer_index));
+		bna_ib_ack(bnad->priv, &txqinfo->ib, acked);
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &txqinfo->flags);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+struct net_device_stats *bnad_get_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &bnad->net_stats;
+	struct cna_stats_mac_rx *rxstats = &bnad->hw_stats->mac_rx_stats;
+	struct cna_stats_mac_tx *txstats = &bnad->hw_stats->mac_tx_stats;
+	int i;
+
+	memset(net_stats, 0, sizeof(*net_stats));
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			net_stats->rx_packets += bnad->rxq_table[i].rx_packets;
+			net_stats->rx_bytes += bnad->rxq_table[i].rx_bytes;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			net_stats->tx_packets += bnad->txq_table[i].tx_packets;
+			net_stats->tx_bytes += bnad->txq_table[i].tx_bytes;
+		}
+	}
+	net_stats->rx_errors = rxstats->rx_fcs_error +
+	    rxstats->rx_alignment_error + rxstats->rx_frame_length_error +
+	    rxstats->rx_code_error + rxstats->rx_undersize;
+	net_stats->tx_errors = txstats->tx_fcs_error + txstats->tx_undersize;
+	net_stats->rx_dropped = rxstats->rx_drop;
+	net_stats->tx_dropped = txstats->tx_drop;
+	net_stats->multicast = rxstats->rx_multicast;
+	net_stats->collisions = txstats->tx_total_collision;
+
+	net_stats->rx_length_errors = rxstats->rx_frame_length_error;
+	net_stats->rx_crc_errors = rxstats->rx_fcs_error;
+	net_stats->rx_frame_errors = rxstats->rx_alignment_error;
+	/* recv'r fifo overrun */
+	net_stats->rx_fifo_errors =
+	    bnad->hw_stats->rxf_stats[0].frame_drops;
+
+	return net_stats;
+}
+
+void bnad_reset_stats(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_rxq_info *rxqinfo;
+	struct bnad_txq_info *txqinfo;
+	int i;
+	memset(&bnad->stats, 0, sizeof(bnad->stats));
+
+	if (bnad->rxq_table) {
+		for (i = 0; i < bnad->rxq_num; i++) {
+			rxqinfo = &bnad->rxq_table[i];
+			rxqinfo->rx_packets = 0;
+			rxqinfo->rx_bytes = 0;
+			rxqinfo->rx_packets_with_error = 0;
+			rxqinfo->rxbuf_alloc_failed = 0;
+		}
+	}
+	if (bnad->txq_table) {
+		for (i = 0; i < bnad->txq_num; i++) {
+			txqinfo = &bnad->txq_table[i];
+			txqinfo->tx_packets = 0;
+			txqinfo->tx_bytes = 0;
+		}
+	}
+}
+
+static void bnad_set_rx_mode_locked(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	int err;
+	unsigned long irq_flags;
+
+	if (BNAD_NOT_READY(bnad))
+		return;
+
+	spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+	if (netdev->flags & IFF_PROMISC) {
+		if (!(bnad->flags & BNAD_F_PROMISC)) {
+			bna_rxf_promiscuous(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_ENABLE);
+			bnad->flags |= BNAD_F_PROMISC;
+		}
+	} else {
+		if (bnad->flags & BNAD_F_PROMISC) {
+			bna_rxf_promiscuous(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_DISABLE);
+			bnad->flags &= ~BNAD_F_PROMISC;
+		}
+	}
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		if (!(bnad->flags & BNAD_F_ALLMULTI)) {
+			bna_rxf_mcast_filter(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_DISABLE);
+			bnad->flags |= BNAD_F_ALLMULTI;
+		}
+	} else {
+		if (bnad->flags & BNAD_F_ALLMULTI) {
+			bna_rxf_mcast_filter(bnad->priv,
+			    BNAD_RX_FUNC_ID, BNA_ENABLE);
+			bnad->flags &= ~BNAD_F_ALLMULTI;
+		}
+	}
+	spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+
+	if (netdev->mc_count) {
+		u8 *mcaddr_list;
+		u8 bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+		struct dev_mc_list *mc;
+		int i;
+
+		mcaddr_list = kzalloc((netdev->mc_count + 1) *
+		    (ETH_ALEN * sizeof(u8)), GFP_ATOMIC);
+		if (!mcaddr_list)
+			return;
+		memcpy(&mcaddr_list[0], bcast_addr, ETH_ALEN * sizeof(u8));
+
+		mc = netdev->mc_list;
+		for (i = 1; mc && i < netdev->mc_count + 1; i++, mc = mc->next)
+			memcpy(&mcaddr_list[i], mc->dmi_addr,
+				ETH_ALEN * sizeof(u8));
+
+		spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+		err = bna_rxf_mcast_mac_set_list(bnad->priv, BNAD_RX_FUNC_ID,
+		    (const u8 *)mcaddr_list, netdev->mc_count + 1);
+		spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+
+		kfree(mcaddr_list);
+	}
+}
+
+static void bnad_set_rx_mode(struct net_device *netdev)
+{
+	bnad_lock();
+	bnad_set_rx_mode_locked(netdev);
+	bnad_unlock();
+}
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id,
+    u8 *mac_ptr, unsigned int cmd)
+{
+	int err = 0;
+	enum bna_status_e (*ucast_mac_func)(struct bna_dev_s *bna_dev,
+		unsigned int rxf_id, const u8 *mac_addr_ptr) = NULL;
+
+	WARN_ON(in_interrupt());
+	if (!is_valid_ether_addr(mac_ptr))
+		return -EINVAL;
+
+	switch (cmd) {
+	case BNAD_UCAST_MAC_SET:
+		ucast_mac_func = bna_rxf_ucast_mac_set;
+		break;
+	case BNAD_UCAST_MAC_ADD:
+		ucast_mac_func = bna_rxf_ucast_mac_add;
+		break;
+	case BNAD_UCAST_MAC_DEL:
+		ucast_mac_func = bna_rxf_ucast_mac_del;
+		break;
+	}
+
+	while (test_and_set_bit(BNAD_SET_UCAST, &bnad->state))
+		msleep(1);
+	init_completion(&bnad->ucast_comp);
+	spin_lock_irq(&bnad->priv_lock);
+	err = ucast_mac_func(bnad->priv, rxf_id, (const u8 *)mac_ptr);
+	spin_unlock_irq(&bnad->priv_lock);
+	if (err)
+		goto ucast_mac_exit;
+
+	DPRINTK(INFO, "Waiting for %s MAC operation %d reply\n",
+		bnad->netdev->name, cmd);
+	wait_for_completion(&bnad->ucast_comp);
+	err = bnad->ucast_comp_status;
+ucast_mac_exit:
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_SET_UCAST, &bnad->state);
+	if (err) {
+		printk(KERN_INFO
+		    "%s unicast MAC address command %d failed: %d\n",
+		    bnad->netdev->name, cmd, err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int bnad_set_mac_address_locked(struct net_device *netdev, void *addr)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int err;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (!BNAD_NOT_READY(bnad)) {
+		err = bnad_ucast_mac(bnad, BNAD_RX_FUNC_ID, (u8 *)sa->sa_data,
+		    BNAD_UCAST_MAC_SET);
+		if (err)
+			return err;
+	}
+
+	memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+	return 0;
+}
+
+static int bnad_set_mac_address(struct net_device *netdev, void *addr)
+{
+	int err = 0;
+
+	bnad_lock();
+	err = bnad_set_mac_address_locked(netdev, addr);
+	bnad_unlock();
+	return err;
+
+}
+
+static int bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	int err = 0;
+
+	WARN_ON(in_interrupt());
+
+	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+		return -EINVAL;
+
+	bnad_lock();
+
+	netdev->mtu = new_mtu;
+
+	err = bnad_sw_reset(netdev);
+
+	bnad_unlock();
+
+	return err;
+}
+
+static int bnad_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	return -EOPNOTSUPP;
+}
+
+static void
+bnad_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	bnad_lock();
+	bnad->vlangrp = grp;
+	bnad_unlock();
+}
+
+static void bnad_vlan_rx_add_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	unsigned long irq_flags;
+
+	DPRINTK(INFO, "%s add vlan %u\n", netdev->name, vid);
+	bnad_lock();
+	if (BNAD_NOT_READY(bnad)) {
+		bnad_unlock();
+		return;
+	}
+	spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+	bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID, (unsigned int)vid);
+	spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+	bnad_unlock();
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev, unsigned short vid)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	unsigned long irq_flags;
+
+	DPRINTK(INFO, "%s remove vlan %u\n", netdev->name, vid);
+	bnad_lock();
+	if (BNAD_NOT_READY(bnad)) {
+		bnad_unlock();
+		return;
+	}
+	spin_lock_irqsave(&bnad->priv_lock, irq_flags);
+	bna_rxf_vlan_del(bnad->priv, BNAD_RX_FUNC_ID, (unsigned int)vid);
+	spin_unlock_irqrestore(&bnad->priv_lock, irq_flags);
+	bnad_unlock();
+}
+
+static void bnad_reconfig_vlans(struct bnad *bnad)
+{
+	u16 vlan_id;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_rxf_vlan_del_all(bnad->priv, BNAD_RX_FUNC_ID);
+	if (bnad->vlangrp) {
+		for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
+			if (vlan_group_get_device(bnad->vlangrp, vlan_id))
+				bna_rxf_vlan_add(bnad->priv, BNAD_RX_FUNC_ID,
+				    (unsigned int)vlan_id);
+		}
+	}
+	spin_unlock_irq(&bnad->priv_lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnad_netpoll(struct net_device *netdev)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+
+	DPRINTK(INFO, "%s bnad_netpoll\n", netdev->name);
+	disable_irq(bnad->pcidev->irq);
+	bnad_isr(bnad->pcidev->irq, netdev);
+	enable_irq(bnad->pcidev->irq);
+}
+#endif
+
+static void bnad_q_num_init(struct bnad *bnad, uint rxqsets)
+{
+	bnad->txq_num = BNAD_TXQ_NUM;
+	bnad->txf_num = 1;
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		if (rxqsets) {
+			bnad->cq_num = rxqsets;
+			if (bnad->cq_num > BNAD_MAX_CQS)
+				bnad->cq_num = BNAD_MAX_CQS;
+		} else
+			bnad->cq_num = min((uint)num_online_cpus(),
+			    (uint)BNAD_MAX_RXQSETS_USED);
+		if (!BNA_POWER_OF_2(bnad->cq_num))
+			BNA_TO_POWER_OF_2(bnad->cq_num);
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+
+		bnad->rxf_num = 1;
+		bnad->msix_num = bnad->txq_num + bnad->cq_num +
+		    BNAD_MSIX_ERR_MAILBOX_NUM;
+	} else {
+		bnad->cq_num = 1;
+		bnad->rxq_num = bnad->cq_num * bnad_rxqs_per_cq;
+		bnad->rxf_num = 1;
+		bnad->msix_num = 0;
+	}
+}
+
+static void bnad_enable_msix(struct bnad *bnad)
+{
+	int i, ret;
+
+	if (!(bnad->flags & BNAD_F_MSIX) || bnad->msix_table)
+		return;
+
+	bnad->msix_table = kzalloc(
+	    bnad->msix_num * sizeof(struct msix_entry), GFP_KERNEL);
+	if (!bnad->msix_table)
+		goto intx_mode;
+
+	for (i = 0; i < bnad->msix_num; i++)
+		bnad->msix_table[i].entry = i;
+
+	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+	    bnad->msix_num);
+	if (ret > 0) {
+		/* Not enough MSI-X vectors. */
+		int rxqsets = ret;
+
+		dev_err(&bnad->pcidev->dev,
+		    "Tried to get %d MSI-X vectors, only got %d\n",
+		    bnad->msix_num, ret);
+		BNA_TO_POWER_OF_2(rxqsets);
+		while (bnad->msix_num > ret && rxqsets) {
+			bnad_q_num_init(bnad, rxqsets);
+			rxqsets >>= 1;
+		}
+		if (bnad->msix_num <= ret) {
+			ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+			    bnad->msix_num);
+			if (ret) {
+				dev_err(&bnad->pcidev->dev,
+				    "Enabling MSI-X failed: %d\n", ret);
+				goto intx_mode;
+			}
+		} else {
+			dev_err(&bnad->pcidev->dev,
+			    "Enabling MSI-X failed: limited (%d) vectors\n",
+			    ret);
+			goto intx_mode;
+		}
+	} else if (ret < 0) {
+		dev_err(&bnad->pcidev->dev, "Enabling MSI-X failed: %d\n", ret);
+		goto intx_mode;
+	}
+
+	dev_info(&bnad->pcidev->dev,
+	    "Enabling MSI-X succeeded with %d vectors, %s\n", bnad->msix_num,
+	    (bnad->cq_num > 1) ? "RSS is enabled" : "RSS is not enabled");
+	return;
+
+intx_mode:
+	dev_warn(&bnad->pcidev->dev, "Switching to INTx mode with no RSS\n");
+	kfree(bnad->msix_table);
+	bnad->msix_table = NULL;
+	bnad->flags &= ~BNAD_F_MSIX;
+	bnad_q_num_init(bnad, 0);
+}
+
+static void bnad_disable_msix(struct bnad *bnad)
+{
+	if ((bnad->flags & BNAD_F_MSIX) && bnad->msix_table) {
+		pci_disable_msix(bnad->pcidev);
+		kfree(bnad->msix_table);
+		bnad->msix_table = NULL;
+		bnad->flags &= ~BNAD_F_MSIX;
+	}
+}
+
+static void bnad_error(struct bnad *bnad)
+{
+	DPRINTK(INFO, "%s bnad_error\n", bnad->netdev->name);
+
+	rtnl_lock();
+	set_bit(BNAD_RESETTING, &bnad->state);
+	if (!test_and_set_bit(BNAD_DISABLED, &bnad->state)) {
+		bnad_detach(bnad);
+		bnad_cleanup(bnad);
+		DPRINTK(WARNING, "%s is disabled upon error\n",
+			bnad->netdev->name);
+	}
+	rtnl_unlock();
+}
+
+static void bnad_resume_after_reset(struct bnad *bnad)
+{
+	int err;
+	struct net_device *netdev = bnad->netdev;
+
+	DPRINTK(WARNING, "port %d resumes after reset\n", bnad->bna_id);
+
+	rtnl_lock();
+	clear_bit(BNAD_RESETTING, &bnad->state);
+
+	bna_port_mac_get(bnad->priv, (u8 *)bnad->perm_addr);
+	BNA_ASSERT(netdev->addr_len == sizeof(bnad->perm_addr));
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+#endif
+	if (is_zero_ether_addr(netdev->dev_addr))
+		memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+	if (netif_running(bnad->netdev)) {
+		err = bnad_open_locked(bnad->netdev);
+		if (err)
+			DPRINTK(ERR, "%s bnad_open failed after reset: %d\n",
+				bnad->netdev->name, err);
+	}
+	rtnl_unlock();
+}
+
+static void bnad_work(struct work_struct *work)
+{
+	struct bnad *bnad = container_of(work, struct bnad, work);
+	unsigned long work_flags;
+
+	DPRINTK(INFO, "port %u bnad_work flags 0x%x\n",
+		bnad->bna_id, bnad->work_flags);
+
+	spin_lock_irq(&bnad->priv_lock);
+	work_flags = bnad->work_flags;
+	bnad->work_flags = 0;
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (work_flags & BNAD_WF_ERROR) {
+		DPRINTK(INFO, "port %u bnad_work: BNAD_WF_ERROR\n",
+			bnad->bna_id);
+		bnad_error(bnad);
+	}
+
+	if (work_flags & BNAD_WF_RESETDONE) {
+		DPRINTK(INFO, "port %u bnad_work: BNAD_WF_RESETDONE\n",
+			bnad->bna_id);
+		bnad_resume_after_reset(bnad);
+	}
+}
+
+static void bnad_stats_timeo(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	int i;
+	struct bnad_rxq_info *rxqinfo;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_stats_get(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (bnad->rx_dyn_coalesce_on) {
+		u8 cls_timer;
+		struct bnad_cq_info *cq;
+		for (i = 0; i < bnad->cq_num; i++) {
+			cq = &bnad->cq_table[i];
+
+			if ((cq->pkt_rate.small_pkt_cnt == 0)
+			    && (cq->pkt_rate.large_pkt_cnt == 0))
+				continue;
+
+				cls_timer = bna_calc_coalescing_timer(
+				bnad->priv, &cq->pkt_rate);
+
+			/*For NAPI version, coalescing timer need to stored*/
+			cq->rx_coalescing_timeo = cls_timer;
+
+			bna_ib_coalescing_timer_set(bnad->priv, &cq->ib,
+			    cls_timer);
+		}
+	}
+
+	for (i = 0; i < bnad->rxq_num; i++) {
+		rxqinfo = &bnad->rxq_table[i];
+		if (!(BNA_QE_IN_USE_CNT(&rxqinfo->skb_unmap_q,
+		    rxqinfo->skb_unmap_q.q_depth) >>
+		     BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) {
+			DPRINTK(INFO, "%s: RxQ %d more buffers to allocate\n",
+				bnad->netdev->name, i);
+			if (test_and_set_bit(BNAD_RXQ_REFILL, &rxqinfo->flags))
+				continue;
+			bnad_alloc_rxbufs(rxqinfo);
+			smp_mb__before_clear_bit();
+			clear_bit(BNAD_RXQ_REFILL, &rxqinfo->flags);
+		}
+	}
+}
+
+static void bnad_free_ioc_mem(struct bnad *bnad)
+{
+	enum bna_dma_mem_type i;
+
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		if (bnad->ioc_meminfo[i].kva && bnad->ioc_meminfo[i].dma)
+			pci_free_consistent(bnad->pcidev,
+			    bnad->ioc_meminfo[i].len, bnad->ioc_meminfo[i].kva,
+			    *(dma_addr_t *)&bnad->ioc_meminfo[i].dma);
+		else if (bnad->ioc_meminfo[i].kva)
+			vfree(bnad->ioc_meminfo[i].kva);
+		bnad->ioc_meminfo[i].kva = NULL;
+	}
+}
+
+/* The following IOC callback functions are called with priv_lock held. */
+
+void bna_iocll_enable_cbfn(void *arg, enum bfa_status status)
+{
+	struct bnad *bnad = arg;
+
+	DPRINTK(WARNING, "port %u IOC enable callback, status %d\n",
+		bnad->bna_id, status);
+
+	bnad->ioc_comp_status = status;
+	complete(&bnad->ioc_comp);
+
+	if (!status) {
+		bnad->work_flags |= BNAD_WF_RESETDONE;
+		if (!test_bit(BNAD_REMOVED, &bnad->state))
+			schedule_work(&bnad->work);
+	}
+}
+
+void bna_iocll_disable_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	DPRINTK(WARNING, "port %u IOC disable callback\n",
+		bnad->bna_id);
+	complete(&bnad->ioc_comp);
+}
+
+void bna_iocll_hbfail_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+
+	DPRINTK(ERR, "port %u IOC HBFail callback\n", bnad->bna_id);
+	bnad_hw_error(bnad, BFA_STATUS_IOC_FAILURE);
+}
+
+void bna_iocll_reset_cbfn(void *arg)
+{
+	struct bnad *bnad = arg;
+	u32 int_status, int_mask;
+	unsigned int irq;
+
+	DPRINTK(WARNING, "port %u IOC reset callback\n", bnad->bna_id);
+
+	/* Clear the status */
+	bna_intr_status_get(bnad->priv, &int_status);
+
+	if (bnad->flags & BNAD_F_MSIX) {
+		if (test_and_clear_bit(BNAD_MBOX_IRQ_DISABLED, &bnad->state)) {
+			irq = bnad->msix_table[bnad->txq_num +
+				bnad->cq_num].vector;
+			DPRINTK(WARNING, "Enabling Mbox IRQ %d for port %d\n",
+				irq, bnad->bna_id);
+			enable_irq(irq);
+		}
+	}
+
+	int_mask = ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS);
+	bna_intx_enable(bnad->priv, int_mask);
+}
+
+static void bnad_ioc_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_timer(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	if (!test_bit(BNAD_REMOVED, &bnad->state))
+		mod_timer(&bnad->ioc_timer,
+		    jiffies + HZ * BNA_IOC_TIMER_FREQ / 1000);
+}
+
+s32
+bnad_cee_attach(struct bnad *bnad)
+{
+	u8 *dma_kva;
+	dma_addr_t dma_pa;
+	struct bfa_cee_s *cee = &bnad->cee;
+
+	memset(cee, 0, sizeof(struct bfa_cee_s));
+
+	/*Allocate memory for dma*/
+	dma_kva = pci_alloc_consistent(bnad->pcidev, bfa_cee_meminfo(),
+			    &dma_pa);
+	if (dma_kva == NULL)
+		return -ENOMEM;
+
+	/*Ugly... need to remove once CAL is fixed.*/
+	((struct bna_dev_s *)bnad->priv)->cee = cee;
+
+	bnad->cee_cbfn.get_attr_cbfn = bnad_cee_get_attr_cb;
+	bnad->cee_cbfn.get_stats_cbfn = bnad_cee_get_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = bnad_cee_reset_stats_cb;
+	bnad->cee_cbfn.reset_stats_cbfn = NULL;
+
+	/*Invoke cee attach function*/
+	bfa_cee_attach(cee, &bnad->priv->ioc, bnad,
+	    bnad->trcmod, bnad->logmod);
+	bfa_cee_mem_claim(cee, dma_kva, dma_pa);
+	return 0;
+}
+
+static void
+bnad_cee_detach(struct bnad *bnad)
+{
+	struct bfa_cee_s *cee = &bnad->cee;
+	if (cee->attr_dma.kva) {
+		pci_free_consistent(bnad->pcidev, bfa_cee_meminfo(),
+		    cee->attr_dma.kva, cee->attr_dma.pa);
+	}
+	bfa_cee_detach(&bnad->cee);
+}
+
+
+static int bnad_priv_init(struct bnad *bnad)
+{
+	dma_addr_t dma_addr;
+	struct bna_dma_addr bna_dma_addr;
+	char inst_name[16];
+	int err, i;
+	struct bfa_pcidev_s pcidev_info;
+	u32 intr_mask;
+
+	DPRINTK(DEBUG, "port %u bnad_priv_init\n", bnad->bna_id);
+
+	if (bnad_msix)
+		bnad->flags |= BNAD_F_MSIX;
+	bnad_q_num_init(bnad, bnad_rxqsets_used);
+
+	bnad->work_flags = 0;
+	INIT_WORK(&bnad->work, bnad_work);
+
+	init_timer(&bnad->stats_timer);
+	bnad->stats_timer.function = &bnad_stats_timeo;
+	bnad->stats_timer.data = (unsigned long)bnad;
+
+	bnad->tx_coalescing_timeo = BNAD_TX_COALESCING_TIMEO;
+	bnad->tx_interpkt_count = BNAD_TX_INTERPKT_COUNT;
+
+	bnad->rx_coalescing_timeo = BNAD_RX_COALESCING_TIMEO;
+	bnad->rx_interpkt_count = BNAD_RX_INTERPKT_COUNT;
+	bnad->rx_interpkt_timeo = BNAD_RX_INTERPKT_TIMEO;
+	bnad->rx_dyn_coalesce_on = BNA_TRUE;
+
+	bnad->rx_csum = 1;
+	bnad->pause_config.tx_pause = 0;
+	bnad->pause_config.rx_pause = 0;
+
+	/* XXX could be vmalloc? */
+	bnad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
+	if (!bnad->trcmod) {
+		DPRINTK(ERR, "port %u failed allocating trace buffer!\n",
+			bnad->bna_id);
+		return -ENOMEM;
+	}
+	bfa_trc_init(bnad->trcmod);
+
+	bnad->logmod = NULL;
+	sprintf(inst_name, "%u", bnad->bna_id);
+
+	bnad->aen = NULL;
+	INIT_LIST_HEAD(&bnad->file_q);
+	INIT_LIST_HEAD(&bnad->file_free_q);
+	for (i = 0; i < BNAD_AEN_MAX_APPS; i++) {
+		bfa_q_qe_init(&bnad->file_buf[i].qe);
+		list_add_tail(&bnad->file_buf[i].qe, &bnad->file_free_q);
+	}
+
+	bnad->priv = kzalloc(bna_get_handle_size(), GFP_KERNEL);
+	if (!bnad->priv) {
+		DPRINTK(ERR, "port %u failed allocating memory for bna\n",
+			bnad->bna_id);
+		err = -ENOMEM;
+		goto free_trcmod;
+	}
+	bnad->priv_stats = pci_alloc_consistent(bnad->pcidev,
+	    BNA_HW_STATS_SIZE, &dma_addr);
+	if (!bnad->priv_stats) {
+		DPRINTK(ERR, "port %u failed allocating memory for bna stats\n",
+			bnad->bna_id);
+		err = -ENOMEM;
+		goto free_priv_mem;
+	}
+	pci_unmap_addr_set(bnad, priv_stats_dma, dma_addr);
+	DPRINTK(DEBUG, "port %u priv_stats dma addr 0x%llx\n",
+		bnad->bna_id, dma_addr);
+
+	BNA_SET_DMA_ADDR(dma_addr, &bna_dma_addr);
+	bna_init(bnad->priv, (void *)bnad->bar0, bnad->priv_stats,
+	    bna_dma_addr, bnad->trcmod);
+	bna_all_stats_get(bnad->priv, &bnad->hw_stats);
+	spin_lock_init(&bnad->priv_lock);
+	bnad->priv_cbfn.ucast_set_cb = bnad_ucast_set_cb;
+	bnad->priv_cbfn.txq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.rxq_stop_cb = bnad_q_stop_cb;
+	bnad->priv_cbfn.link_up_cb = bnad_link_up_cb;
+	bnad->priv_cbfn.link_down_cb = bnad_link_down_cb;
+	bnad->priv_cbfn.stats_get_cb = bnad_stats_get_cb;
+	bnad->priv_cbfn.hw_error_cb = bnad_hw_error_cb;
+	bnad->priv_cbfn.lldp_get_cfg_cb = bnad_lldp_get_cfg_cb;
+	/* Diagnostics */
+	bnad->priv_cbfn.set_diag_lb_cb = bnad_set_diag_lb_cb;
+
+	bna_register_callback(bnad->priv, &bnad->priv_cbfn, bnad);
+
+	bna_iocll_meminfo(bnad->priv, bnad->ioc_meminfo);
+	for (i = 0; i < BNA_MEM_T_MAX; i++) {
+		if (!bnad->ioc_meminfo[i].len)
+			continue;
+		switch (i) {
+		case BNA_KVA_MEM_T_FWTRC:
+			bnad->ioc_meminfo[i].kva = vmalloc(
+			    bnad->ioc_meminfo[i].len);
+			break;
+		default:
+			bnad->ioc_meminfo[i].kva = pci_alloc_consistent(
+			    bnad->pcidev, bnad->ioc_meminfo[i].len,
+			    (dma_addr_t *)&bnad->ioc_meminfo[i].dma);
+
+			break;
+		}
+		if (!bnad->ioc_meminfo[i].kva) {
+			DPRINTK(ERR,
+				"port %u failed allocating %u bytes"
+				"memory for IOC\n",
+				bnad->bna_id, bnad->ioc_meminfo[i].len);
+			err = -ENOMEM;
+			goto free_ioc_mem;
+		}
+	}
+
+	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+	pcidev_info.device_id = bnad->pcidev->device;
+	pcidev_info.pci_bar_kva = bnad->bar0;
+	bna_iocll_attach(bnad->priv, bnad, bnad->ioc_meminfo,
+	    &pcidev_info, bnad->trcmod, bnad->aen, bnad->logmod);
+
+	err = bnad_cee_attach(bnad);
+	if (err) {
+		DPRINTK(ERR, "port %u cee_attach failed: %d\n",
+			bnad->bna_id, err);
+		goto iocll_detach;
+	}
+
+	if (bnad->flags & BNAD_F_MSIX)
+		bnad_enable_msix(bnad);
+	else
+		dev_info(&bnad->pcidev->dev, "Working in INTx mode, no RSS\n");
+	bna_intx_disable(bnad->priv, &intr_mask);
+	err = bnad_request_mbox_irq(bnad);
+	if (err)
+		goto disable_msix;
+
+	init_completion(&bnad->ioc_comp);
+	DPRINTK(DEBUG, "port %u enabling IOC ...\n", bnad->bna_id);
+	spin_lock_irq(&bnad->priv_lock);
+	bna_iocll_enable(bnad->priv);
+	spin_unlock_irq(&bnad->priv_lock);
+
+	init_timer(&bnad->ioc_timer);
+	bnad->ioc_timer.function = &bnad_ioc_timeout;
+	bnad->ioc_timer.data = (unsigned long)bnad;
+	mod_timer(&bnad->ioc_timer, jiffies + HZ * BNA_IOC_TIMER_FREQ / 1000);
+
+	DPRINTK(DEBUG, "port %u waiting for IOC ready.\n", bnad->bna_id);
+	wait_for_completion(&bnad->ioc_comp);
+	if (!bnad->ioc_comp_status) {
+		DPRINTK(INFO, "port %u IOC is enabled.\n", bnad->bna_id);
+		bna_port_mac_get(bnad->priv,
+		    (u8 *)bnad->perm_addr);
+	} else {
+		DPRINTK(ERR, "port %u enabling IOC failed: %d\n",
+			bnad->bna_id, bnad->ioc_comp_status);
+		set_bit(BNAD_RESETTING, &bnad->state);
+	}
+
+	return 0;
+
+disable_msix:
+	bnad_disable_msix(bnad);
+	bnad_cee_detach(bnad);
+iocll_detach:
+	bna_iocll_detach(bnad->priv);
+free_ioc_mem:
+	bnad_free_ioc_mem(bnad);
+	bna_uninit(bnad->priv);
+	pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE, bnad->priv_stats,
+	    pci_unmap_addr(bnad, priv_stats_dma));
+	bnad->priv_stats = NULL;
+free_priv_mem:
+	kfree(bnad->priv);
+	bnad->priv = NULL;
+free_trcmod:
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+
+	return err;
+}
+
+static void bnad_priv_uninit(struct bnad *bnad)
+{
+	int i;
+	enum bna_status_e err;
+
+	if (bnad->priv) {
+		DPRINTK(INFO, "port %u disabling IOC ...\n", bnad->bna_id);
+		init_completion(&bnad->ioc_comp);
+		for (i = 0; i < 10; i++) {
+			spin_lock_irq(&bnad->priv_lock);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err || err == BNA_BUSY);
+			if (!err)
+				break;
+			msleep(1000);
+		}
+		if (err) {
+			/* Probably firmware crashed. */
+			DPRINTK(INFO,
+				"bna_iocll_disable failed,"
+				"clean up and try again\n");
+			spin_lock_irq(&bnad->priv_lock);
+			bna_cleanup(bnad->priv);
+			err = bna_iocll_disable(bnad->priv);
+			spin_unlock_irq(&bnad->priv_lock);
+			BNA_ASSERT(!err);
+		}
+		wait_for_completion(&bnad->ioc_comp);
+		set_bit(BNAD_IOC_DISABLED, &bnad->state);
+		DPRINTK(INFO, "port %u IOC is disabled\n", bnad->bna_id);
+
+		set_bit(BNAD_REMOVED, &bnad->state);
+		/* Stop the timer after disabling IOC. */
+		del_timer_sync(&bnad->ioc_timer);
+		bnad_free_ioc_mem(bnad);
+		bna_iocll_detach(bnad->priv);
+
+		flush_scheduled_work();
+		bnad_free_mbox_irq(bnad);
+		bnad_disable_msix(bnad);
+
+		bnad_cee_detach(bnad);
+
+		bna_uninit(bnad->priv);
+		if (bnad->priv_stats) {
+			pci_free_consistent(bnad->pcidev, BNA_HW_STATS_SIZE,
+			    bnad->priv_stats,
+			    pci_unmap_addr(bnad, priv_stats_dma));
+			bnad->priv_stats = NULL;
+		}
+		kfree(bnad->priv);
+		bnad->priv = NULL;
+	}
+	BNA_ASSERT(list_empty(&bnad->file_q));
+	kfree(bnad->trcmod);
+	bnad->trcmod = NULL;
+}
+
+static struct pci_device_id bnad_pci_id_table[] = {
+	{
+	 .vendor = PCI_VENDOR_ID_BROCADE,
+	 .device = PCI_DEVICE_ID_BROCADE_CATAPULT,
+	 .subvendor = PCI_ANY_ID,
+	 .subdevice = PCI_ANY_ID,
+	 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+	 .class_mask = 0xffff00
+	},
+	{0, 0}
+};
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidev_id)
+{
+	int err, using_dac;
+	struct net_device *netdev;
+	struct bnad *bnad;
+	unsigned long mmio_start, mmio_len;
+	static u32 bna_id;
+
+	DPRINTK(INFO, "bnad_pci_probe(0x%p, 0x%p)\n", pcidev, pcidev_id);
+
+	DPRINTK(DEBUG, "PCI func %d\n", PCI_FUNC(pcidev->devfn));
+	if (!bfad_get_firmware_buf(pcidev)) {
+		printk(KERN_WARNING "Failed to load Firmware Image!\n");
+		return 0;
+	}
+
+	err = pci_enable_device(pcidev);
+	if (err) {
+		dev_err(&pcidev->dev, "pci_enable_device failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pcidev, BNAD_NAME);
+	if (err) {
+		dev_err(&pcidev->dev, "pci_request_regions failed: %d\n", err);
+		goto disable_device;
+	}
+
+	if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+		DPRINTK(INFO, "64bit DMA mask\n");
+	} else {
+		err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pcidev,
+							  DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pcidev->dev,
+				    "set 32bit consistent DMA mask failed: %d\n"
+					, err);
+				goto release_regions;
+			}
+		}
+		using_dac = 0;
+		DPRINTK(INFO, "32bit DMA mask\n");
+	}
+
+	pci_set_master(pcidev);
+
+	 netdev = alloc_etherdev(sizeof(struct bnad));
+	if (!netdev) {
+		dev_err(&pcidev->dev, "alloc_etherdev failed\n");
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &pcidev->dev);
+	pci_set_drvdata(pcidev, netdev);
+
+	bnad = netdev_priv(netdev);
+	set_bit(BNAD_DISABLED, &bnad->state);
+	bnad->netdev = netdev;
+	bnad->pcidev = pcidev;
+	mmio_start = pci_resource_start(pcidev, 0);
+	mmio_len = pci_resource_len(pcidev, 0);
+	bnad->bar0 = ioremap_nocache(mmio_start, mmio_len);
+	if (!bnad->bar0) {
+		dev_err(&pcidev->dev, "ioremap for bar0 failed\n");
+		err = -ENOMEM;
+		goto free_devices;
+	}
+	DPRINTK(INFO, "bar0 mapped to %p, len %lu\n", bnad->bar0, mmio_len);
+
+	netdev->netdev_ops = &bnad_netdev_ops;
+	netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+#ifdef NETIF_F_IPV6_CSUM
+	netdev->features |= NETIF_F_IPV6_CSUM;
+#endif
+#ifdef NETIF_F_TSO
+	netdev->features |= NETIF_F_TSO;
+#endif
+#ifdef NETIF_F_TSO6
+	netdev->features |= NETIF_F_TSO6;
+#endif
+#ifdef NETIF_F_LRO
+	netdev->features |= NETIF_F_LRO;
+#endif
+#ifdef BNAD_VLAN_FEATURES
+	netdev->vlan_features = netdev->features;
+#endif
+	if (using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+	    NETIF_F_HW_VLAN_FILTER;
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len - 1;
+
+	bnad_set_ethtool_ops(netdev);
+
+	bnad->bna_id = bna_id;
+	err = bnad_priv_init(bnad);
+	if (err) {
+		printk(KERN_ERR "port %u init failed: %d\n", bnad->bna_id, err);
+		goto unmap_bar0;
+	}
+
+	BNA_ASSERT(netdev->addr_len == ETH_ALEN);
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, bnad->perm_addr, netdev->addr_len);
+#endif
+	memcpy(netdev->dev_addr, bnad->perm_addr, netdev->addr_len);
+
+
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+	err = register_netdev(netdev);
+	if (err) {
+		printk(KERN_ERR "port %u register_netdev failed: %d\n",
+		    bnad->bna_id, err);
+		goto bnad_device_uninit;
+	}
+
+
+	bna_id++;
+	return 0;
+
+bnad_device_uninit:
+	bnad_priv_uninit(bnad);
+unmap_bar0:
+	iounmap(bnad->bar0);
+free_devices:
+	pci_set_drvdata(pcidev, NULL);
+	free_netdev(netdev);
+release_regions:
+	pci_release_regions(pcidev);
+disable_device:
+	pci_disable_device(pcidev);
+
+	return err;
+}
+
+static void __devexit bnad_pci_remove(struct pci_dev *pcidev)
+{
+	struct net_device *netdev = pci_get_drvdata(pcidev);
+	struct bnad *bnad;
+
+	DPRINTK(INFO, "%s bnad_pci_remove\n", netdev->name);
+	if (!netdev)
+		return;
+	bnad = netdev_priv(netdev);
+
+
+	unregister_netdev(netdev);
+
+	bnad_priv_uninit(bnad);
+	iounmap(bnad->bar0);
+	pci_set_drvdata(pcidev, NULL);
+	free_netdev(netdev);
+	pci_release_regions(pcidev);
+	pci_disable_device(pcidev);
+}
+
+static struct pci_driver bnad_pci_driver = {
+	.name = BNAD_NAME,
+	.id_table = bnad_pci_id_table,
+	.probe    = bnad_pci_probe,
+	.remove   = __devexit_p(bnad_pci_remove),
+};
+
+static int __init bnad_module_init(void)
+{
+	int err;
+
+	printk(KERN_INFO "Brocade 10G Ethernet driver %s\n", bfa_version);
+	DPRINTK(INFO, "Module bna is loaded at 0x%p\n",
+		__this_module.module_core);
+	err = bnad_check_module_params();
+	if (err)
+		return err;
+
+	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
+
+	return pci_register_driver(&bnad_pci_driver);
+}
+
+static void __exit bnad_module_exit(void)
+{
+	pci_unregister_driver(&bnad_pci_driver);
+
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
+
diff -ruP linux-2.6.32-rc4-orig/drivers/net/bna/bnad.h linux-2.6.32-rc4-mod/drivers/net/bna/bnad.h
--- linux-2.6.32-rc4-orig/drivers/net/bna/bnad.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.32-rc4-mod/drivers/net/bna/bnad.h	2009-10-16 10:30:53.075436000 -0700
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _BNAD_H_
+#define _BNAD_H_
+
+#include <cee/bfa_cee.h>
+#include "bna.h"
+
+#if !defined(CONFIG_INET_LRO) && !defined(CONFIG_INET_LRO_MODULE)
+#include <net/ip.h>
+#include <net/tcp.h>
+#else
+#include <linux/inet_lro.h>
+#endif
+
+#include "bnad_compat.h"
+
+#if !defined(CONFIG_INET_LRO) && !defined(CONFIG_INET_LRO_MODULE)
+#include "inet_lro.h"
+#endif
+
+#define BNAD_LRO_MAX_DESC	8
+#define BNAD_LRO_MAX_AGGR	64
+
+
+#define BNAD_MAX_Q_DEPTH	0x10000
+#define BNAD_MIN_Q_DEPTH	0x200
+
+#define BNAD_TXQ_NUM		1
+#define BNAD_TX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_TXQ	2048
+
+#define BNAD_MAX_RXQS		64
+#define BNAD_MAX_RXQSETS_USED	16
+#define BNAD_RX_FUNC_ID		0
+#define BNAD_ENTRIES_PER_RXQ	2048
+
+#define BNAD_MAX_CQS		64
+#define BNAD_MAX_RXQS_PER_CQ	2
+
+#define BNAD_MSIX_ERR_MAILBOX_NUM	1
+
+#define BNAD_INTX_MAX_IB_NUM	16
+#define BNAD_INTX_IB_NUM	2	/* 1 for Tx, 1 for Rx */
+#define BNAD_INTX_TX_IB_ID	0
+#define BNAD_INTX_RX_IB_ID	1
+
+#define BNAD_QUEUE_NAME_SIZE	16
+
+#define BNAD_JUMBO_MTU		9000
+
+#define BNAD_COALESCING_TIMER_UNIT	5	/* 5us */
+#define BNAD_MAX_COALESCING_TIMEO	0xFF 	/* in 5us units */
+#define BNAD_MAX_INTERPKT_COUNT		0xFF
+#define BNAD_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
+
+#define BNAD_TX_COALESCING_TIMEO	20	/* 20 * 5 = 100us */
+#define BNAD_TX_INTERPKT_COUNT		32
+
+#define BNAD_RX_COALESCING_TIMEO	12	/* 12 * 5 = 60us */
+#define BNAD_RX_INTERPKT_COUNT		6
+#define BNAD_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
+
+#define BNAD_SMALL_RXBUF_SIZE	128
+
+#define BNAD_RIT_OFFSET		0
+#define BNAD_MULTICAST_RXQ_ID	0
+
+#define BNAD_NETIF_WAKE_THRESHOLD	8
+
+#define BNAD_TX_MAX_VECTORS		255
+#define BNAD_TX_MAX_VECTORS_PER_WI	4
+#define BNAD_TX_MAX_DATA_PER_WI		0xFFFFFF	/* 24 bits */
+#define BNAD_TX_MAX_DATA_PER_VECTOR	0x3FFF		/* 14 bits */
+#define BNAD_TX_MAX_WRR_QUOTA		0xFFF		/* 12 bits */
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
+
+#define BNAD_CQ_PROCESS_LIMIT		512
+
+#define BNAD_NOT_READY(_bnad)	test_bit(BNAD_RESETTING, &(_bnad)->state)
+
+#define BNAD_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)	\
+    (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define bnad_lock()
+#define bnad_unlock()
+
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_ct;
+extern u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+
+struct bnad_skb_unmap {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+    u32    producer_index;
+    u32    consumer_index;
+    struct bnad_skb_unmap *unmap_array;
+    u32	q_depth;
+};
+
+struct bnad_ib_entry {
+	struct bna_ib *ib;
+	void *ib_seg_addr;
+	struct bna_ib_config ib_config;
+};
+
+struct bnad_txq_info {
+	unsigned long flags;
+#define BNAD_TXQ_FREE_SENT	0
+	struct bna_txq txq;
+	struct bna_ib ib;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct bnad *bnad;
+	volatile u32 *hw_consumer_index;
+	struct bna_txq_config txq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+#ifdef DEBUG_TX
+	u32 max_tso;
+	u32 tx_vectors[32];
+#endif
+} ____cacheline_aligned;
+
+struct bnad_rxq_info {
+	unsigned long flags;
+#define BNAD_RXQ_REFILL		0
+	struct bna_rxq rxq;
+	struct bnad_unmap_q skb_unmap_q;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_packets_with_error;
+	u64 rxbuf_alloc_failed;
+	struct bnad *bnad;
+	u32 rxq_id;
+	struct bna_rxq_config rxq_config;
+}  ____cacheline_aligned;
+
+struct bnad_cq_info {
+	struct bna_cq cq;
+	struct bna_ib ib;
+	struct bnad *bnad;
+	struct bna_pkt_rate pkt_rate;
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	volatile u32 *hw_producer_index;
+	struct net_lro_mgr  lro;
+	struct napi_struct napi;
+	u32 cq_id;
+	struct bna_cq_config cq_config;
+	char name[BNAD_QUEUE_NAME_SIZE];
+}  ____cacheline_aligned;
+
+struct bnad_txf_info {
+	u32 txf_id;
+	struct bna_txf_config txf_config;
+};
+
+struct bnad_rxf_info {
+	u32 rxf_id;
+	struct bna_rxf_config rxf_config;
+};
+
+enum bnad_ucast_cmd {
+	BNAD_UCAST_MAC_SET,
+	BNAD_UCAST_MAC_ADD,
+	BNAD_UCAST_MAC_DEL
+};
+
+struct bnad_diag_lb_params {
+	struct bnad *bnad;
+	struct completion diag_lb_comp;
+	int diag_lb_comp_status;
+	int diag_lb_link_state;
+#define BNAD_DIAG_LB_LS_UNKNOWN	-1
+#define BNAD_DIAG_LB_LS_UP	 0
+#define BNAD_DIAG_LB_LS_DOWN	 1
+};
+
+#define BNAD_AEN_MAX_APPS 8
+struct bnad_aen_file_s {
+	struct list_head  qe;
+	struct bnad *bnad;
+	s32 ri;
+	s32 app_id;
+};
+
+struct bnad {
+	struct net_device *netdev;
+	struct pci_dev *pcidev;
+	struct bna_dev_s *priv;
+
+	unsigned long state;
+#define BNAD_DISABLED		0
+#define BNAD_RESETTING		1
+#define BNAD_REMOVED		2
+#define BNAD_SET_UCAST		4
+#define BNAD_IOC_DISABLED	5
+#define BNAD_PORT_DISABLED	6
+#define BNAD_MBOX_IRQ_DISABLED	7
+
+	unsigned int flags;
+#define BNAD_F_MSIX		0x01
+#define BNAD_F_PROMISC		0x02
+#define BNAD_F_ALLMULTI		0x04
+#define BNAD_F_WOL		0x08
+#define BNAD_F_TXQ_DEPTH	0x10
+#define BNAD_F_RXQ_DEPTH	0x20
+
+
+	uint txq_num;
+	uint txq_depth;
+	struct bnad_txq_info *txq_table;
+	uint rxq_num;
+	uint rxq_depth;
+	struct bnad_rxq_info *rxq_table;
+	uint cq_num;
+	struct bnad_cq_info *cq_table;
+
+	struct vlan_group *vlangrp;
+
+	u32 rx_csum;
+
+	uint msix_num;
+	struct msix_entry *msix_table;
+
+	uint ib_num;
+	struct bnad_ib_entry *ib_table;
+
+	struct bna_rit_entry *rit;		/* RxQ Indirection Table */
+
+	spinlock_t priv_lock ____cacheline_aligned;
+
+	uint txf_num;
+	struct bnad_txf_info *txf_table;
+	uint rxf_num;
+	struct bnad_rxf_info *rxf_table;
+
+	struct timer_list stats_timer;
+	struct net_device_stats net_stats;
+
+	u8 tx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 tx_interpkt_count;
+
+	u8 rx_coalescing_timeo;	/* Unit is 5usec. */
+	u8 rx_interpkt_count;
+	u8	rx_interpkt_timeo;	/* 4 bits, unit is 0.5usec. */
+	u8 rx_dyn_coalesce_on;	/* Rx Dynamic Intr Moderation Flag */
+	u8 ref_count;
+	u8 lldp_comp_status;
+	u8 cee_stats_comp_status;
+	u8 cee_reset_stats_status;
+	u8 ucast_comp_status;
+	u8 qstop_comp_status;
+	u16 rsvd_2;
+	int ioc_comp_status;
+
+	struct bna_pause_config pause_config;
+
+	struct bna_stats *hw_stats;
+	struct bnad_drv_stats stats;
+
+	struct work_struct work;
+	unsigned int work_flags;
+#define BNAD_WF_ERROR		0x1
+#define BNAD_WF_RESETDONE	0x2
+
+	struct completion lldp_comp;
+	struct completion cee_stats_comp;
+	struct completion cee_reset_stats_comp;
+	struct completion ucast_comp;
+	struct completion qstop_comp;
+	struct completion ioc_comp;
+
+	u32 bna_id;
+	u8 __iomem *bar0;			/* registers */
+	unsigned char perm_addr[ETH_ALEN];
+	u32 pci_saved_config[16];
+
+	void *priv_stats;
+	DECLARE_PCI_UNMAP_ADDR(priv_stats_dma)
+
+	struct bfa_trc_mod_s *trcmod;
+	struct bfa_log_mod_s *logmod;
+	struct bfa_aen_s *aen;
+	struct bnad_aen_file_s file_buf[BNAD_AEN_MAX_APPS];
+	struct list_head         file_q;
+	struct list_head         file_free_q;
+	struct bna_meminfo ioc_meminfo[BNA_MEM_T_MAX];
+	struct timer_list ioc_timer;
+
+	struct bna_mbox_cbfn priv_cbfn;
+
+	char adapter_name[64];
+	char port_name[64];
+
+	/* Diagnostics */
+	struct bna_diag_lb_pkt_stats *lb_stats;
+	struct bnad_diag_lb_params *dlbp;
+
+	/* CEE Stuff */
+	struct bfa_cee_cbfn_s cee_cbfn;
+	struct bfa_cee_s cee;
+
+	struct list_head list_entry;
+};
+
+extern uint bnad_rxqs_per_cq;
+extern uint bnad_rxq_depth;
+extern uint bnad_txq_depth;
+extern uint bnad_small_large_rxbufs;
+
+extern struct list_head bnad_list;
+
+int bnad_open(struct net_device *netdev);
+int bnad_stop(struct net_device *netdev);
+int bnad_stop_locked(struct net_device *netdev);
+int bnad_open_locked(struct net_device *netdev);
+int bnad_sw_reset(struct net_device *netdev);
+int bnad_resetting(struct bnad *bnad);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_ioctl_init(void);
+void bnad_ioctl_exit(void);
+struct net_device_stats *bnad_get_stats(struct net_device *netdev);
+void bnad_reset_stats(struct net_device *netdev);
+
+int bnad_ucast_mac(struct bnad *bnad, unsigned int rxf_id,
+		   u8 *mac_ptr, unsigned int cmd);
+int bnad_rxq_init(struct bnad *bnad, uint rxq_id);
+void bnad_setup_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_alloc_for_rxq(struct bnad *bnad, uint rxq_id);
+void bnad_free_rxq(struct bnad *bnad, uint rxq_id);
+int bnad_cq_init(struct bnad *bnad, uint cq_id);
+void bnad_setup_cq(struct bnad *bnad, uint cq_id);
+int bnad_alloc_ib(struct bnad *bnad, uint ib_id);
+void bnad_setup_ib(struct bnad *bnad, uint ib_id);
+void bnad_rxib_init(struct bnad *bnad, uint cq_id, uint ib_id);
+void bnad_free_ib(struct bnad *bnad, uint ib_id);
+int bnad_request_cq_irq(struct bnad *bnad, uint cq_id);
+u32 bnad_get_msglevel(struct net_device *netdev);
+void bnad_set_msglevel(struct net_device *netdev, u32 msglevel);
+int bnad_alloc_unmap_q(struct bnad_unmap_q *unmap_q, u32 q_depth);
+int bnad_disable_rxq(struct bnad *bnad, u32 rxq_id);
+void bnad_free_cq(struct bnad *bnad, uint cq_id);
+void bnad_add_to_list(struct bnad *bnad);
+void bnad_remove_from_list(struct bnad *bnad);
+struct bnad *get_bnadev(int bna_id);
+/* For diagnostics */
+int bnad_diag_lb_rx(struct bnad *bnad, struct sk_buff *skb);
+int bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _BNAD_H_ */

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2010-02-22 12:41 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-11-17  8:30 Subject: [PATCH 1/6] bna: Brocade 10Gb Ethernet device driver Rasesh Mody
2009-11-17  8:59 ` David Miller
  -- strict thread matches above, loose matches on Subject: below --
2010-02-19 21:52 Rasesh Mody
2010-02-22 12:40 ` Stanislaw Gruszka
2010-02-12 14:00 Rasesh Mody
2010-02-10  6:29 Rasesh Mody
2010-02-10 17:08 ` Stephen Hemminger
2010-02-10 17:09 ` Stephen Hemminger
2010-02-10 17:10 ` Stephen Hemminger
2010-02-10 17:15 ` Stephen Hemminger
2009-12-19  1:28 Debashis Dutt
2009-12-19  7:14 ` Joe Perches
2009-11-26  9:28 Debashis Dutt
2009-11-24  3:51 Rasesh Mody
2009-11-13  3:46 Rasesh Mody
2009-11-01  5:03 Rasesh Mody
2009-11-01  5:23 ` Joe Perches
2009-11-01 19:25   ` Stephen Hemminger
2009-11-03  3:14     ` Debashis Dutt
2009-11-03  3:34       ` Greg KH
2009-11-03 18:24   ` Rasesh Mody
2009-11-04  0:31     ` Joe Perches
2009-11-01  8:02 ` Eric Dumazet
2009-11-03  7:54   ` Debashis Dutt
2009-11-01 19:19 ` Stephen Hemminger
2009-11-03  3:05   ` Debashis Dutt
2009-10-16 18:24 Rasesh Mody
2009-10-16 20:20 ` Ben Hutchings
2009-10-16 23:19   ` Rasesh Mody
2009-10-20  0:54 ` Herbert Xu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.