All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] PA Semi PWRficient Ethernet driver
@ 2007-01-29  6:08 Olof Johansson
  2007-01-29 18:22 ` Stephen Hemminger
                   ` (3 more replies)
  0 siblings, 4 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-29  6:08 UTC (permalink / raw)
  To: jgarzik; +Cc: netdev

Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)

Basic enablement, will be complemented with performance enhancements
over time. PHY support will be added as well.

This patch still uses the numerical PCI vendor id, it will be replaced
when the pci_ids.h change goes in (same as the other currently pending
drivers).

Signed-off-by: Olof Johansson <olof@lixom.net>


Index: merge/drivers/net/Kconfig
===================================================================
--- merge.orig/drivers/net/Kconfig
+++ merge/drivers/net/Kconfig
@@ -2348,6 +2348,13 @@ config QLA3XXX
 	  To compile this driver as a module, choose M here: the module
 	  will be called qla3xxx.
 
+config PASEMI_MAC
+	tristate "PA Semi 1/10Gbit MAC"
+	depends on PPC64 && PCI
+	help
+	  This driver supports the on-chip 1/10Gbit Ethernet controller on
+	  PA Semi's PWRficient line of chips.
+
 endmenu
 
 #
Index: merge/drivers/net/Makefile
===================================================================
--- merge.orig/drivers/net/Makefile
+++ merge/drivers/net/Makefile
@@ -196,6 +196,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
 
 obj-$(CONFIG_MACB) += macb.o
 
Index: merge/drivers/net/pasemi_mac.c
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+#include "pasemi_mac.h"
+
+#define INITIAL_RX_RING_SIZE 512
+#define INITIAL_TX_RING_SIZE 512
+
+#define BUF_SIZE 2048
+
+#define PAS_DMA_MAX_IF     40
+#define PAS_DMA_MAX_RXCH   8
+#define PAS_DMA_MAX_TXCH   8
+
+/* XXXOJN these should come out of the device tree some day */
+#define PAS_DMA_CAP_BASE   0xe00d0040
+#define PAS_DMA_CAP_SIZE   0x100
+#define PAS_DMA_COM_BASE   0xe00d0100
+#define PAS_DMA_COM_SIZE   0x100
+
+static irqreturn_t pasemi_mac_tx_intr(int, void *);
+static irqreturn_t pasemi_mac_rx_intr(int, void *);
+static int pasemi_mac_clean_tx(struct pasemi_mac *mac);
+static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit);
+
+static struct pasdma_status *dma_status;
+
+static int pasemi_set_mac_addr(struct pasemi_mac *mac)
+{
+	struct pci_dev *pdev = mac->pdev;
+	struct device_node *dn = pci_device_to_OF_node(pdev);
+	const u8 *maddr;
+	u8 addr[6];
+
+	if (!dn) {
+		dev_dbg(&pdev->dev,
+			  "No device node for mac, not configuring\n");
+		return -ENOENT;
+	}
+
+	maddr = get_property(dn, "mac-address", NULL);
+	if (maddr == NULL) {
+		dev_warn(&pdev->dev,
+			 "no mac address in device tree, not configuring\n");
+		return -ENOENT;
+	}
+
+	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
+		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+		dev_warn(&pdev->dev,
+			 "can't parse mac address, not configuring\n");
+		return -EINVAL;
+	}
+
+	memcpy(mac->mac_addr, addr, sizeof(addr));
+	return 0;
+}
+
+static void pasemi_mac_setup_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac_rxring *ring;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	int chan_id = mac->dma_rxch;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+	ring->count = INITIAL_RX_RING_SIZE;
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
+				  GFP_KERNEL);
+
+	/* Allocate descriptors */
+	ring->desc = (void *)__get_free_pages(GFP_KERNEL,
+				      get_order(ring->count *
+				      sizeof(struct pas_dma_xct_descr)));
+	ring->dma = virt_to_phys(ring->desc);
+	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));
+
+	ring->buffers = (void *)__get_free_pages(GFP_KERNEL,
+					 get_order(ring->count * sizeof(u64)));
+	ring->buf_dma = virt_to_phys(ring->buffers);
+	memset(ring->buffers, 0, ring->count * sizeof(u64));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
+			       PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
+			       PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
+			       PAS_DMA_RXCHAN_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 2));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
+			       PAS_DMA_RXCHAN_CFG_HBU(1));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
+			       PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
+			       PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
+			       PAS_DMA_RXINT_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 3));
+
+	ring->next_to_fill = 0; ring->next_to_clean = 0;
+	mac->rx = ring;
+}
+
+
+static void pasemi_mac_setup_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	u32 val;
+	int chan_id = mac->dma_txch;
+	struct pasemi_mac_txring *ring;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+	ring->count = INITIAL_TX_RING_SIZE;
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
+				  GFP_KERNEL);
+	/* Allocate descriptors */
+	ring->desc = (void *)__get_free_pages(GFP_KERNEL,
+				      get_order(ring->count *
+				      sizeof(struct pas_dma_xct_descr)));
+	ring->dma = virt_to_phys(ring->desc);
+
+	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
+			       PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
+	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
+	val |= PAS_DMA_TXCHAN_BASEU_SIZ(INITIAL_TX_RING_SIZE >> 2);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
+			       PAS_DMA_TXCHAN_CFG_TY_IFACE |
+			       PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
+			       PAS_DMA_TXCHAN_CFG_UP |
+			       PAS_DMA_TXCHAN_CFG_WT(2));
+
+	ring->next_to_use = 0; ring->next_to_clean = 0;
+	mac->tx = ring;
+}
+
+static noinline void pasemi_mac_free_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < mac->tx->count; i++) {
+		if (INFO(mac->tx, i).dma) {
+			pr_debug("cleaning tx %d, dma addr %lx\n", i, INFO(mac->tx, i).dma);
+			if (INFO(mac->tx, i).skb)
+				dev_kfree_skb_any(INFO(mac->tx, i).skb);
+			INFO(mac->tx, i).dma = 0;
+			INFO(mac->tx, i).skb = 0;
+			DESCR(mac->tx, i).mactx = 0;
+			DESCR(mac->tx, i).ptr = 0;
+		}
+	}
+
+	/* Add free of all data structures here */
+	free_pages((unsigned long)mac->tx->desc, get_order(
+			mac->tx->count * sizeof(struct pas_dma_xct_descr)));
+
+	kfree(mac->tx);
+	mac->tx = NULL;
+
+	for (i = 0; i < mac->rx->count; i++) {
+		if (INFO(mac->rx, i).dma) {
+			pr_debug("cleaning rx %d, dma addr %lx\n", i, INFO(mac->rx, i).dma);
+			if (INFO(mac->rx, i).skb)
+				dev_kfree_skb_any(INFO(mac->rx, i).skb);
+			INFO(mac->rx, i).dma = 0;
+			INFO(mac->rx, i).skb = 0;
+			DESCR(mac->rx, i).macrx = 0;
+			DESCR(mac->rx, i).ptr = 0;
+		}
+	}
+
+	free_pages((unsigned long)mac->rx->desc, get_order(mac->rx->count *
+		   sizeof(struct pas_dma_xct_descr)));
+
+	free_pages((unsigned long)mac->rx->buffers,
+		   get_order(mac->rx->count * sizeof(u64)));
+
+	kfree(mac->rx);
+	mac->rx = NULL;
+}
+
+static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	dma_addr_t dma;
+	struct sk_buff *skb;
+	int start = mac->rx->next_to_fill;
+	int count;
+
+	count = ((mac->rx->next_to_clean & ~7) + mac->rx->count -
+		 mac->rx->next_to_fill) % mac->rx->count;
+
+	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0)) {
+		pr_debug("first time fill, clean %d fill %d\n",
+			 mac->rx->next_to_clean, mac->rx->next_to_fill);
+		count = mac->rx->count - 8;
+	}
+
+	/* Limit so we don't go into the last cache line */
+	count -= 8;
+
+	if (count <= 0)
+		return;
+
+	for (i = start; i < start+count; i++) {
+		skb = dev_alloc_skb(BUF_SIZE);
+
+		if (!skb)
+			return;
+
+		skb->dev = dev;
+
+		dma = virt_to_phys(skb->data);
+		INFO(mac->rx, i).skb = skb;
+		INFO(mac->rx, i).dma = dma;
+		BUFF(mac->rx, i) = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+
+		pr_debug("Adding buffer slot %d, addr %lx len %x raw %lx @%p (DESCR @%p)\n",
+		       i, dma, BUF_SIZE, XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma), &DESCR(mac->rx, i),
+			&BUFF(mac->rx, i));
+
+	}
+
+	wmb();
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
+			       count);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_INCR(mac->dma_if),
+			       count);
+
+	mac->rx->next_to_fill += count;
+}
+
+static int pasemi_mac_open(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+	int ret;
+
+	pr_debug("pasemi_mac_open\n");
+
+	/* enable rx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
+			       PAS_DMA_COM_RXCMD_EN);
+
+	/* enable tx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
+			       PAS_DMA_COM_TXCMD_EN);
+
+	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
+		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
+		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
+
+	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
+		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
+
+	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
+			       PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+
+	pasemi_mac_setup_rx_resources(dev);
+	pasemi_mac_setup_tx_resources(dev);
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
+			       PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
+			       PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
+
+	/* enable rx if */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+			       PAS_DMA_RXINT_RCMDSTA_EN);
+
+	/* enable rx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+			       PAS_DMA_RXCHAN_CCMDSTA_EN |
+			       PAS_DMA_RXCHAN_CCMDSTA_DU);
+
+	/* enable tx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_EN);
+
+	pasemi_mac_replenish_rx_ring(dev);
+
+	netif_start_queue(dev);
+	netif_poll_enable(dev);
+
+	ret = request_irq(128 + mac->dma_txch, &pasemi_mac_tx_intr,
+			  IRQF_DISABLED, "pasemi_mac tx", dev);
+	if (ret)
+		printk("request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + mac->dma_txch, ret);
+
+	ret = request_irq(128 + 20 + mac->dma_rxch, &pasemi_mac_rx_intr,
+			  IRQF_DISABLED, "pasemi_mac rx", dev);
+	if (ret)
+		printk("request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
+
+	return 0;
+}
+
+static int pasemi_mac_close(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int stat;
+
+	netif_stop_queue(dev);
+
+	/* Clean out any pending buffers */
+	pasemi_mac_clean_tx(mac);
+	pasemi_mac_clean_rx(mac, mac->rx->count);
+
+	/* Disable interface */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+		      PAS_DMA_RXINT_RCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+		      PAS_DMA_RXCHAN_CCMDSTA_ST);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+				      &stat);
+	} while (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+				      &stat);
+	} while (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+				      &stat);
+	} while (stat & PAS_DMA_RXINT_RCMDSTA_ACT);
+
+	/* Then, disable the channel. This must be done separately from
+	 * stopping, since you can't disable when active.
+	 */
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+	free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
+
+	/* Free resources */
+	pasemi_mac_free_resources(dev);
+
+	return 0;
+}
+
+static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	struct pasemi_mac_txring *txring;
+	u64 flags;
+	dma_addr_t map;
+
+	if (mac->tx->next_to_clean+mac->tx->count == mac->tx->next_to_use)
+		pasemi_mac_clean_tx(mac);
+
+	mac->stats.tx_packets++;
+	mac->stats.tx_bytes += skb->len;
+
+	txring = mac->tx;
+
+	flags = XCT_MACTX_O | XCT_MACTX_ST |
+		XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		switch (skb->nh.iph->protocol) {
+		case IPPROTO_TCP:
+			flags |= XCT_MACTX_CSUM_TCP;
+			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		case IPPROTO_UDP:
+			flags |= XCT_MACTX_CSUM_UDP;
+			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		}
+	}
+
+	map = virt_to_phys(skb->data);
+
+	DESCR(txring, txring->next_to_use).mactx = flags |
+						XCT_MACTX_LLEN(skb->len);
+	DESCR(txring, txring->next_to_use).ptr = XCT_PTR_LEN(skb->len) |
+						XCT_PTR_ADDR(map);
+	INFO(txring, txring->next_to_use).dma = map;
+	INFO(txring, txring->next_to_use).skb = skb;
+	/* XXXOJN Deal with fragmented packets when larger MTU is supported */
+
+	txring->next_to_use++;
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
+
+	return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	return &mac->stats;
+}
+
+static void pasemi_mac_set_rx_mode(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+
+	return;
+
+	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
+
+	/* Set promiscuous */
+	if (dev->flags & IFF_PROMISC)
+		flags |= PAS_MAC_CFG_PCFG_PR;
+	else
+		flags &= ~PAS_MAC_CFG_PCFG_PR;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+}
+
+
+static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
+{
+	int i, j;
+	struct pas_dma_xct_descr descr;
+	struct pasemi_mac_buffer *info;
+	struct sk_buff *skb;
+	unsigned int len;
+	int start;
+	int count;
+	dma_addr_t dma;
+
+	start = mac->rx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < start+mac->rx->count && count < limit; i++) {
+		rmb();
+		mb();
+		descr = DESCR(mac->rx, i);
+		if (!(descr.macrx & XCT_MACRX_O))
+			break;
+
+		count++;
+
+		info = NULL;
+
+		/* We have to scan for our skb since there's no way
+		 * to back-map them from the descriptor, and if we
+		 * have several receive channels then they might not
+		 * show up in the same order as they were put on the
+		 * interface ring.
+		 */
+
+		dma = (descr.ptr & XCT_PTR_ADDR_M);
+		for (j = start; j < start+mac->rx->count; j++)
+			if (INFO(mac->rx, j).dma == dma) {
+				info = &INFO(mac->rx, j);
+				break;
+			}
+
+		BUG_ON(!info);
+
+		skb = info->skb;
+
+		len = (descr.macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
+
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, mac->netdev);
+
+		if ((descr.macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
+			skb->ip_summed = CHECKSUM_COMPLETE;
+			skb->csum = (descr.macrx & XCT_MACRX_CSUM_M) >>
+					   XCT_MACRX_CSUM_S;
+		} else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		mac->stats.rx_bytes += len;
+		mac->stats.rx_packets++;
+
+		netif_receive_skb(skb);
+
+		DESCR(mac->rx, i).ptr = 0;
+		DESCR(mac->rx, i).macrx = 0;
+		info->dma = 0;
+		info->skb = 0;
+		mb();
+	}
+
+	mac->rx->next_to_clean += count;
+	pasemi_mac_replenish_rx_ring(mac->netdev);
+
+	return count;
+}
+
+static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
+{
+	int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	int start;
+	int count;
+
+	start = mac->tx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < mac->tx->next_to_use; i++) {
+		dp = &DESCR(mac->tx, i);
+		if (!dp || (dp->mactx & XCT_MACTX_O))
+			break;
+
+		count++;
+
+		info = &INFO(mac->tx, i);
+
+		dev_kfree_skb_irq(info->skb);
+		info->skb = NULL;
+		info->dma = 0;
+		dp->mactx = 0;
+		dp->ptr = 0;
+	}
+	mac->tx->next_to_clean += count;
+	return count;
+}
+
+
+static int pasemi_mac_poll(struct net_device *dev, int *budget)
+{
+	int pkts, limit = min(*budget, dev->quota);
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	pkts = pasemi_mac_clean_rx(mac, limit);
+
+	if (pkts < limit) {
+		/* all done, no more packets present */
+		netif_rx_complete(dev);
+
+		/* re-enable receive interrupts */
+		pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+				       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+		return 0;
+	} else {
+		/* used up our quantum, so reschedule */
+		dev->quota -= pkts;
+		*budget -= pkts;
+		return 1;
+	}
+}
+
+
+static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+
+	netif_rx_schedule(dev);
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
+
+	reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
+	      PAS_IOB_DMA_RXCH_RESET_DINTC;
+	if (*mac->rx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev,
+			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
+
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+
+	pasemi_mac_clean_tx(mac);
+
+	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
+	if (*mac->tx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
+			       reg);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit
+pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int index = 0;
+	struct net_device *dev;
+	struct pasemi_mac *mac;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pasemi_mac: Could not enable device.\n");
+		return -ENODEV;
+	}
+	dev = alloc_etherdev(sizeof(struct pasemi_mac));
+	if (dev == NULL) {
+		dev_err(&pdev->dev,
+			"pasemi_mac: Could not allocate ethernet device.\n");
+		return -ENODEV;
+	}
+	SET_MODULE_OWNER(dev);
+
+	pci_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	mac = netdev_priv(dev);
+	memset(mac, 0, sizeof(struct pasemi_mac));
+
+	mac->pdev = pdev;
+	mac->netdev = dev;
+	mac->dma_pdev = pci_find_device(0x1959, 0xa007, NULL);
+	mac->iob_pdev = pci_find_device(0x1959, 0xa001, NULL);
+
+	if (!mac->iob_pdev) {
+		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
+		return -ENODEV;
+	}
+
+	/* These should come out of the device tree eventually */
+	mac->dma_txch = index;
+	mac->dma_rxch = index;
+
+	/* We probe GMAC before XAUI, but the DMA interfaces are
+	 * in XAUI, GMAC order.
+	 */
+	if (index < 4)
+		mac->dma_if = index + 2;
+	else
+		mac->dma_if = index - 4;
+	index++;
+
+	switch (pdev->device) {
+	case 0xa005:
+		mac->type = MAC_TYPE_GMAC;
+		break;
+	case 0xa006:
+		mac->type = MAC_TYPE_XAUI;
+		break;
+	default:
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* get mac addr from device tree */
+	if (pasemi_set_mac_addr(mac)) {
+		err = -ENODEV;
+		goto out;
+	}
+	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+
+	strcpy(dev->name, "eth%d");
+
+	dev->open = pasemi_mac_open;
+	dev->stop = pasemi_mac_close;
+	dev->hard_start_xmit = pasemi_mac_start_tx;
+	dev->get_stats = pasemi_mac_get_stats;
+	dev->set_multicast_list = pasemi_mac_set_rx_mode;
+	dev->weight = 64;
+	dev->poll = pasemi_mac_poll;
+	dev->features = NETIF_F_HW_CSUM;
+
+	/* The dma status structure is located in the I/O bridge, and
+	 * is cache coherent.
+	 */
+	if (!dma_status)
+		/* XXXOJN This should come from the device tree */
+		dma_status = __ioremap(0xfd800000, 0x1000, 0);
+
+	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
+	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
+
+	err = register_netdev(dev);
+
+	if (err)
+		printk("register_netdev failed with error %d\n", err);
+	else
+		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
+		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
+		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
+		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+	return err;
+
+out:
+	printk(KERN_ERR "pasemi_mac: init failed\n");
+
+	pci_disable_device(pdev);
+	free_netdev(dev);
+	return err;
+}
+
+static struct pci_device_id pasemi_mac_pci_tbl[] = {
+	{ PCI_DEVICE(0x1959, 0xa005) },
+	{ PCI_DEVICE(0x1959, 0xa006) },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
+
+static struct pci_driver pasemi_mac_driver = {
+	.name = "pasemi_mac",
+	.id_table = pasemi_mac_pci_tbl,
+	.probe = pasemi_mac_probe,
+};
+
+static void __exit pasemi_mac_cleanup_module(void)
+{
+	pci_unregister_driver(&pasemi_mac_driver);
+}
+
+int pasemi_mac_init_module(void)
+{
+	return pci_module_init(&pasemi_mac_driver);
+}
+module_init(pasemi_mac_init_module);
+module_exit(pasemi_mac_cleanup_module);
Index: merge/drivers/net/pasemi_mac.h
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) 2006 PA Semi, Inc
+ *
+ * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef PASEMI_MAC_H
+#define PASEMI_MAC_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct pasemi_mac_txring {
+	struct pas_dma_xct_descr	*desc;
+	dma_addr_t	 dma;
+	unsigned int	 size;
+	unsigned int	 count;
+	unsigned int	 next_to_use;
+	unsigned int	 next_to_clean;
+	unsigned short	 last_count;
+	struct pasemi_mac_buffer *desc_info;
+};
+
+struct pasemi_mac_rxring {
+	struct pas_dma_xct_descr	*desc;	/* RX channel descriptor ring */
+	dma_addr_t	 dma;
+	u64		*buffers;	/* RX interface buffer ring */
+	dma_addr_t	 buf_dma;
+	unsigned int	 size;
+	unsigned int	 count;
+	unsigned int	 next_to_fill;
+	unsigned int	 next_to_clean;
+	unsigned short	 last_count;
+	struct pasemi_mac_buffer *desc_info;
+};
+
+/* Number of unused descriptors, considering ring wraparounds */
+#define PASEMI_MAC_DESC_UNUSED(ring) ((((ring)->next_to_clean >		\
+					(ring)->next_to_use) ?		\
+					  0 :				\
+					  (ring)->count) +		\
+					  (ring)->next_to_clean -	\
+					  (ring)->next_to_use - 1)
+
+#define DESCR(ring, i) ((ring)->desc[i % ((ring)->count)])
+#define BUFF(ring, i) ((ring)->buffers[i % ((ring)->count)])
+#define INFO(ring, i) ((ring)->desc_info[i % ((ring)->count)])
+
+struct pasemi_mac {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct pci_dev *dma_pdev;
+	struct pci_dev *iob_pdev;
+	struct net_device_stats stats;
+
+	/* Pointer to the cacheable per-channel status registers */
+	uint64_t	*rx_status;
+	uint64_t	*tx_status;
+
+	uint8_t		type;
+#define MAC_TYPE_GMAC	1
+#define MAC_TYPE_XAUI	2
+	uint32_t	dma_txch;
+	uint32_t	dma_if;
+	uint32_t	dma_rxch;
+
+	uint8_t		mac_addr[6];
+
+	struct timer_list	rxtimer;
+
+	struct pasemi_mac_txring *tx;
+	struct pasemi_mac_rxring *rx;
+};
+
+struct pasemi_mac_buffer {
+	struct sk_buff *skb;
+	dma_addr_t	dma;
+};
+
+
+
+#define PAS_MAC_CFG_PCFG		0x80
+#define    PAS_MAC_CFG_PCFG_PE		0x80000000
+#define    PAS_MAC_CFG_PCFG_CE		0x40000000
+#define    PAS_MAC_CFG_PCFG_BU		0x20000000
+#define    PAS_MAC_CFG_PCFG_TT		0x10000000
+#define    PAS_MAC_CFG_PCFG_TSR_M	0x0c000000
+#define    PAS_MAC_CFG_PCFG_TSR_10M	0x00000000
+#define    PAS_MAC_CFG_PCFG_TSR_100M	0x04000000
+#define    PAS_MAC_CFG_PCFG_TSR_1G	0x08000000
+#define    PAS_MAC_CFG_PCFG_TSR_10G	0x0c000000
+#define    PAS_MAC_CFG_PCFG_T24		0x02000000
+#define    PAS_MAC_CFG_PCFG_PR		0x01000000
+#define    PAS_MAC_CFG_PCFG_CRO_M	0x00ff0000
+#define    PAS_MAC_CFG_PCFG_CRO_S	16
+#define    PAS_MAC_CFG_PCFG_IPO_M	0x0000ff00
+#define    PAS_MAC_CFG_PCFG_IPO_S	8
+#define    PAS_MAC_CFG_PCFG_S1		0x00000080
+#define    PAS_MAC_CFG_PCFG_IO_M	0x00000060
+#define    PAS_MAC_CFG_PCFG_IO_MAC	0x00000000
+#define    PAS_MAC_CFG_PCFG_IO_OFF	0x00000020
+#define    PAS_MAC_CFG_PCFG_IO_IND_ETH	0x00000040
+#define    PAS_MAC_CFG_PCFG_IO_IND_IP	0x00000060
+#define    PAS_MAC_CFG_PCFG_LP		0x00000010
+#define    PAS_MAC_CFG_PCFG_TS		0x00000008
+#define    PAS_MAC_CFG_PCFG_HD		0x00000004
+#define    PAS_MAC_CFG_PCFG_SPD_M	0x00000003
+#define    PAS_MAC_CFG_PCFG_SPD_10M	0x00000000
+#define    PAS_MAC_CFG_PCFG_SPD_100M	0x00000001
+#define    PAS_MAC_CFG_PCFG_SPD_1G	0x00000002
+#define    PAS_MAC_CFG_PCFG_SPD_10G	0x00000003
+#define PAS_MAC_CFG_TXP			0x98
+#define    PAS_MAC_CFG_TXP_FCF		0x01000000
+#define    PAS_MAC_CFG_TXP_FCE		0x00800000
+#define    PAS_MAC_CFG_TXP_FC		0x00400000
+#define    PAS_MAC_CFG_TXP_FPC_M	0x00300000
+#define    PAS_MAC_CFG_TXP_FPC_S	20
+#define    PAS_MAC_CFG_TXP_FPC(x)	(((x) << PAS_MAC_CFG_TXP_FPC_S) & PAS_MAC_CFG_TXP_FPC_M)
+#define    PAS_MAC_CFG_TXP_RT		0x00080000
+#define    PAS_MAC_CFG_TXP_BL		0x00040000
+#define    PAS_MAC_CFG_TXP_SL_M		0x00030000
+#define    PAS_MAC_CFG_TXP_SL_S		16
+#define    PAS_MAC_CFG_TXP_SL(x)	(((x) << PAS_MAC_CFG_TXP_SL_S) & PAS_MAC_CFG_TXP_SL_M)
+#define    PAS_MAC_CFG_TXP_COB_M	0x0000f000
+#define    PAS_MAC_CFG_TXP_COB_S	12
+#define    PAS_MAC_CFG_TXP_COB(x)	(((x) << PAS_MAC_CFG_TXP_COB_S) & PAS_MAC_CFG_TXP_COB_M)
+#define    PAS_MAC_CFG_TXP_TIFT_M	0x00000f00
+#define    PAS_MAC_CFG_TXP_TIFT_S	8
+#define    PAS_MAC_CFG_TXP_TIFT(x)	(((x) << PAS_MAC_CFG_TXP_TIFT_S) & PAS_MAC_CFG_TXP_TIFT_M)
+#define    PAS_MAC_CFG_TXP_TIFG_M	0x000000ff
+#define    PAS_MAC_CFG_TXP_TIFG_S	0
+#define    PAS_MAC_CFG_TXP_TIFG(x)	(((x) << PAS_MAC_CFG_TXP_TIFG_S) & PAS_MAC_CFG_TXP_TIFG_M)
+
+#define PAS_MAC_IPC_CHNL		0x208
+#define    PAS_MAC_IPC_CHNL_DCHNO_M	0x003f0000
+#define    PAS_MAC_IPC_CHNL_DCHNO_S	16
+#define    PAS_MAC_IPC_CHNL_DCHNO(x)	(((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
+					 PAS_MAC_IPC_CHNL_DCHNO_M)
+#define    PAS_MAC_IPC_CHNL_BCH_M	0x0000003f
+#define    PAS_MAC_IPC_CHNL_BCH_S	0
+#define    PAS_MAC_IPC_CHNL_BCH(x)	(((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
+					 PAS_MAC_IPC_CHNL_BCH_M)
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+
+#define PAS_DMA_COM_TXCMD	0x100	/* Transmit Command Register  */
+#define    PAS_DMA_COM_TXCMD_EN		0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA	0x104	/* Transmit Status Register   */
+#define    PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD	0x108	/* Receive Command Register   */
+#define    PAS_DMA_COM_RXCMD_EN		0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA	0x10c	/* Receive Status Register    */
+#define    PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
+
+
+#define _PAS_DMA_RXINT_STRIDE		0x20
+#define PAS_DMA_RXINT_RCMDSTA(i)	(0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_RCMDSTA_EN	0x00000001
+#define    PAS_DMA_RXINT_RCMDSTA_ST	0x00000002
+#define    PAS_DMA_RXINT_RCMDSTA_OO	0x00000100
+#define    PAS_DMA_RXINT_RCMDSTA_BP	0x00000200
+#define    PAS_DMA_RXINT_RCMDSTA_DR	0x00000400
+#define    PAS_DMA_RXINT_RCMDSTA_BT	0x00000800
+#define    PAS_DMA_RXINT_RCMDSTA_TB	0x00001000
+#define    PAS_DMA_RXINT_RCMDSTA_ACT	0x00010000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_M	0xfffe0000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_S	17
+#define PAS_DMA_RXINT_INCR(i)		(0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_INCR_INCR_M	0x0000ffff
+#define    PAS_DMA_RXINT_INCR_INCR_S	0
+#define    PAS_DMA_RXINT_INCR_INCR(x)	((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i)		(0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEL_BRBL(x)	((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i)		(0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEU_BRBH(x)	((x) & 0xfff)
+#define    PAS_DMA_RXINT_BASEU_SIZ_M	0x3fff0000	/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXINT_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXINT_BASEU_SIZ(x)	(((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+					 PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
+#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
+#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
+#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
+#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
+#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
+#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
+#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
+#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+					 PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
+#define    PAS_DMA_TXCHAN_CFG_WT_S	6
+#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+					 PAS_DMA_TXCHAN_CFG_WT_M)
+#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
+#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
+#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
+#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_RXCHAN_CCMDSTA	0x800	/* Command / Status		*/
+#define _PAS_DMA_RXCHAN_CFG	0x804	/* Configuration		*/
+#define _PAS_DMA_RXCHAN_INCR	0x810	/* Descriptor increment		*/
+#define _PAS_DMA_RXCHAN_CNT	0x814	/* Descriptor count/offset	*/
+#define _PAS_DMA_RXCHAN_BASEL	0x818	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_RXCHAN_BASEU	0x81c	/*			(high)	*/
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ACT	0x00010000	/* Active */
+#define    PAS_DMA_RXCHAN_CCMDSTA_DU	0x00020000
+#define PAS_DMA_RXCHAN_CFG(c)     (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CFG_HBU_M	0x00000380
+#define    PAS_DMA_RXCHAN_CFG_HBU_S	7
+#define    PAS_DMA_RXCHAN_CFG_HBU(x)	(((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+					 PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c)    (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c)   (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c)   (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_RXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+	uint64_t rx_sta[64];
+	uint64_t tx_sta[20];
+};
+
+#define    PAS_STATUS_PCNT_M		0x000000000000ffff
+#define    PAS_STATUS_PCNT_S		0
+#define    PAS_STATUS_DCNT_M		0x00000000ffff0000
+#define    PAS_STATUS_DCNT_S		16
+#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000
+#define    PAS_STATUS_BPCNT_S		32
+#define    PAS_STATUS_TIMER		0x1000000000000000
+#define    PAS_STATUS_ERROR		0x2000000000000000
+#define    PAS_STATUS_SOFT		0x4000000000000000
+#define    PAS_STATUS_INT		0x8000000000000000
+
+#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+struct pas_dma_xct_descr {
+	union {
+ 		u64	mactx;
+#define XCT_MACTX_T		0x8000000000000000
+#define XCT_MACTX_ST		0x4000000000000000
+#define XCT_MACTX_NORES		0x0000000000000000
+#define XCT_MACTX_8BRES		0x1000000000000000
+#define XCT_MACTX_24BRES	0x2000000000000000
+#define XCT_MACTX_40BRES	0x3000000000000000
+#define XCT_MACTX_I		0x0800000000000000
+#define XCT_MACTX_O		0x0400000000000000
+#define XCT_MACTX_E		0x0200000000000000
+#define XCT_MACTX_VLAN_M	0x0180000000000000
+#define XCT_MACTX_VLAN_NOP	0x0000000000000000
+#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000
+#define XCT_MACTX_VLAN_INSERT   0x0100000000000000
+#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000
+#define XCT_MACTX_CRC_M		0x0060000000000000
+#define XCT_MACTX_CRC_NOP	0x0000000000000000
+#define XCT_MACTX_CRC_INSERT	0x0020000000000000
+#define XCT_MACTX_CRC_PAD	0x0040000000000000
+#define XCT_MACTX_CRC_REPLACE	0x0060000000000000
+#define XCT_MACTX_SS		0x0010000000000000
+#define XCT_MACTX_LLEN_M	0x00007fff00000000
+#define XCT_MACTX_LLEN_S	32ull
+#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M		0x00000000f8000000
+#define XCT_MACTX_IPH_S		27ull
+#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M		0x0000000007c00000
+#define XCT_MACTX_IPO_S		22ull
+#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M	0x0000000000000060
+#define XCT_MACTX_CSUM_NOP	0x0000000000000000
+#define XCT_MACTX_CSUM_TCP	0x0000000000000040
+#define XCT_MACTX_CSUM_UDP	0x0000000000000060
+#define XCT_MACTX_V6		0x0000000000000010
+#define XCT_MACTX_C		0x0000000000000004
+#define XCT_MACTX_AL2		0x0000000000000002
+		u64	macrx;
+#define XCT_MACRX_T		0x8000000000000000
+#define XCT_MACRX_ST		0x4000000000000000
+#define XCT_MACRX_NORES		0x0000000000000000
+#define XCT_MACRX_8BRES		0x1000000000000000
+#define XCT_MACRX_24BRES	0x2000000000000000
+#define XCT_MACRX_40BRES	0x3000000000000000
+#define XCT_MACRX_O		0x0400000000000000
+#define XCT_MACRX_E		0x0200000000000000
+#define XCT_MACRX_FF		0x0100000000000000
+#define XCT_MACRX_PF		0x0080000000000000
+#define XCT_MACRX_OB		0x0040000000000000
+#define XCT_MACRX_OD		0x0020000000000000
+#define XCT_MACRX_FS		0x0010000000000000
+#define XCT_MACRX_NB_M		0x000fc00000000000
+#define XCT_MACRX_NB_S		46ULL
+#define XCT_MACRX_NB(x)		((((long)(x)) << XCT_MACRX_NB_S) & XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M	0x00003fff00000000
+#define XCT_MACRX_LLEN_S	32ULL
+#define XCT_MACRX_LLEN(x)	((((long)(x)) << XCT_MACRX_LLEN_S) & XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC		0x0000000080000000
+#define XCT_MACRX_LEN_M		0x0000000060000000
+#define XCT_MACRX_LEN_TOOSHORT	0x0000000020000000
+#define XCT_MACRX_LEN_BELOWMIN	0x0000000040000000
+#define XCT_MACRX_LEN_TRUNC	0x0000000060000000
+#define XCT_MACRX_CAST_M	0x0000000018000000
+#define XCT_MACRX_CAST_UNI	0x0000000000000000
+#define XCT_MACRX_CAST_MULTI	0x0000000008000000
+#define XCT_MACRX_CAST_BROAD	0x0000000010000000
+#define XCT_MACRX_CAST_PAUSE	0x0000000018000000
+#define XCT_MACRX_VLC_M		0x0000000006000000
+#define XCT_MACRX_FM		0x0000000001000000
+#define XCT_MACRX_HTY_M		0x0000000000c00000
+#define XCT_MACRX_HTY_IPV4_OK	0x0000000000000000
+#define XCT_MACRX_HTY_IPV6 	0x0000000000400000
+#define XCT_MACRX_HTY_IPV4_BAD	0x0000000000800000
+#define XCT_MACRX_HTY_NONIP	0x0000000000c00000
+#define XCT_MACRX_IPP_M		0x00000000003f0000
+#define XCT_MACRX_IPP_S		16
+#define XCT_MACRX_CSUM_M	0x000000000000ffff
+#define XCT_MACRX_CSUM_S	0
+	};
+	union {
+		u64	ptr;
+#define XCT_PTR_T		0x8000000000000000
+#define XCT_PTR_LEN_M		0x7ffff00000000000
+#define XCT_PTR_LEN_S		44
+#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M		0x00000fffffffffff
+#define XCT_PTR_ADDR_S		0
+#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+		u64	rxb;
+#define XCT_RXB_LEN_M		0x0ffff00000000000
+#define XCT_RXB_LEN_S		44
+#define XCT_RXB_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_RXB_ADDR_M		0x00000fffffffffff
+#define XCT_RXB_ADDR_S		0
+#define XCT_RXB_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+	};
+};
+
+#endif /* PASEMI_MAC_H */

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-29  6:08 [PATCH] PA Semi PWRficient Ethernet driver Olof Johansson
@ 2007-01-29 18:22 ` Stephen Hemminger
  2007-01-30  1:41   ` Olof Johansson
  2007-01-29 22:35 ` Francois Romieu
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 23+ messages in thread
From: Stephen Hemminger @ 2007-01-29 18:22 UTC (permalink / raw)
  To: Olof Johansson; +Cc: jgarzik, netdev

Basic initalization, setup comments.

> +static int pasemi_mac_open(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int flags;
> +	int ret;
> +
> +	pr_debug("pasemi_mac_open\n");

dev_dbg() ?

> +
> +	/* enable rx section */
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
> +			       PAS_DMA_COM_RXCMD_EN);
> +
> +	/* enable tx section */
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
> +			       PAS_DMA_COM_TXCMD_EN);
> +
> +	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
> +		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
> +		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
> +
> +	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
> +
> +	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
> +		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
> +
> +	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
> +
> +	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
> +			       PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
> +
> +	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
> +			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
> +
> +	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
> +
> +	pasemi_mac_setup_rx_resources(dev);
> +	pasemi_mac_setup_tx_resources(dev);
> +
> +	pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
> +			       PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
> +			       PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
> +
> +	/* enable rx if */
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
> +			       PAS_DMA_RXINT_RCMDSTA_EN);
> +
> +	/* enable rx channel */
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
> +			       PAS_DMA_RXCHAN_CCMDSTA_EN |
> +			       PAS_DMA_RXCHAN_CCMDSTA_DU);
> +
> +	/* enable tx channel */
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
> +			       PAS_DMA_TXCHAN_TCMDSTA_EN);
> +
> +	pasemi_mac_replenish_rx_ring(dev);
> +
> +	netif_start_queue(dev);
> +	netif_poll_enable(dev);
> +
> +	ret = request_irq(128 + mac->dma_txch, &pasemi_mac_tx_intr,
> +			  IRQF_DISABLED, "pasemi_mac tx", dev);

Shouldn't you get IRQ value from PCI config?

> +	if (ret)
> +		printk("request_irq of irq %d failed: %d\n",
> +		       mac->dma_pdev->irq + mac->dma_txch, ret);
> +
> +	ret = request_irq(128 + 20 + mac->dma_rxch, &pasemi_mac_rx_intr,
> +			  IRQF_DISABLED, "pasemi_mac rx", dev);
> +	if (ret)
> +		printk("request_irq of irq %d failed: %d\n",
> +		       mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);

You need to return error code and unwind other request_irq.

> +	return 0;
> +}
> +
> +static int pasemi_mac_close(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int stat;
> +
> +	netif_stop_queue(dev);
> +
> +	/* Clean out any pending buffers */
> +	pasemi_mac_clean_tx(mac);
> +	pasemi_mac_clean_rx(mac, mac->rx->count);
> +
> +	/* Disable interface */
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
> +			       PAS_DMA_TXCHAN_TCMDSTA_ST);
> +	pci_write_config_dword(mac->dma_pdev,
> +		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
> +		      PAS_DMA_RXINT_RCMDSTA_ST);
> +	pci_write_config_dword(mac->dma_pdev,
> +		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
> +		      PAS_DMA_RXCHAN_CCMDSTA_ST);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
> +				      &stat);
> +	} while (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
> +				      &stat);
> +	} while (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
> +				      &stat);
> +	} while (stat & PAS_DMA_RXINT_RCMDSTA_ACT);
> +
> +	/* Then, disable the channel. This must be done separately from
> +	 * stopping, since you can't disable when active.
> +	 */
> +
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
> +
> +	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
> +	free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
> +
> +	/* Free resources */
> +	pasemi_mac_free_resources(dev);
> +
> +	return 0;
> +}
> +
> +static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	struct pasemi_mac_txring *txring;
> +	u64 flags;
> +	dma_addr_t map;
> +
> +	if (mac->tx->next_to_clean+mac->tx->count == mac->tx->next_to_use)
> +		pasemi_mac_clean_tx(mac);
> +
> +	mac->stats.tx_packets++;
> +	mac->stats.tx_bytes += skb->len;
> +
> +	txring = mac->tx;
> +
> +	flags = XCT_MACTX_O | XCT_MACTX_ST |
> +		XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
> +
> +	if (skb->ip_summed == CHECKSUM_PARTIAL) {
> +		switch (skb->nh.iph->protocol) {
> +		case IPPROTO_TCP:
> +			flags |= XCT_MACTX_CSUM_TCP;
> +			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
> +			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
> +			break;
> +		case IPPROTO_UDP:
> +			flags |= XCT_MACTX_CSUM_UDP;
> +			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
> +			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
> +			break;
> +		}
> +	}
> +
> +	map = virt_to_phys(skb->data);
> +
> +	DESCR(txring, txring->next_to_use).mactx = flags |
> +						XCT_MACTX_LLEN(skb->len);
> +	DESCR(txring, txring->next_to_use).ptr = XCT_PTR_LEN(skb->len) |
> +						XCT_PTR_ADDR(map);
> +	INFO(txring, txring->next_to_use).dma = map;
> +	INFO(txring, txring->next_to_use).skb = skb;
> +	/* XXXOJN Deal with fragmented packets when larger MTU is supported */
> +
> +	txring->next_to_use++;
> +
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);

You need to handle flow control and do netif_stop_queue/netif_wake_queue
when your transmit ring gets full.

> +
> +	return NETDEV_TX_OK;
> +}
> +
> +static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +
> +	return &mac->stats;
> +}
> +
> +static void pasemi_mac_set_rx_mode(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int flags;
> +
> +	return;
> +
> +	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
> +
> +	/* Set promiscuous */
> +	if (dev->flags & IFF_PROMISC)
> +		flags |= PAS_MAC_CFG_PCFG_PR;
> +	else
> +		flags &= ~PAS_MAC_CFG_PCFG_PR;
> +
> +	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
> +}
> +
> +
> +static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
> +{
> +	int i, j;
> +	struct pas_dma_xct_descr descr;
> +	struct pasemi_mac_buffer *info;
> +	struct sk_buff *skb;
> +	unsigned int len;
> +	int start;
> +	int count;
> +	dma_addr_t dma;
> +
> +	start = mac->rx->next_to_clean;
> +	count = 0;
> +
> +	for (i = start; i < start+mac->rx->count && count < limit; i++) {
> +		rmb();
> +		mb();
> +		descr = DESCR(mac->rx, i);
> +		if (!(descr.macrx & XCT_MACRX_O))
> +			break;
> +
> +		count++;
> +
> +		info = NULL;
> +
> +		/* We have to scan for our skb since there's no way
> +		 * to back-map them from the descriptor, and if we
> +		 * have several receive channels then they might not
> +		 * show up in the same order as they were put on the
> +		 * interface ring.
> +		 */
> +
> +		dma = (descr.ptr & XCT_PTR_ADDR_M);
> +		for (j = start; j < start+mac->rx->count; j++)
> +			if (INFO(mac->rx, j).dma == dma) {
> +				info = &INFO(mac->rx, j);
> +				break;
> +			}
> +
> +		BUG_ON(!info);
> +
> +		skb = info->skb;
> +
> +		len = (descr.macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
> +
> +		skb_put(skb, len);
> +
> +		skb->protocol = eth_type_trans(skb, mac->netdev);
> +
> +		if ((descr.macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
> +			skb->ip_summed = CHECKSUM_COMPLETE;
> +			skb->csum = (descr.macrx & XCT_MACRX_CSUM_M) >>
> +					   XCT_MACRX_CSUM_S;
> +		} else
> +			skb->ip_summed = CHECKSUM_NONE;
> +
> +		mac->stats.rx_bytes += len;
> +		mac->stats.rx_packets++;
> +
> +		netif_receive_skb(skb);
> +
> +		DESCR(mac->rx, i).ptr = 0;
> +		DESCR(mac->rx, i).macrx = 0;
> +		info->dma = 0;
> +		info->skb = 0;
> +		mb();
> +	}
> +
> +	mac->rx->next_to_clean += count;
> +	pasemi_mac_replenish_rx_ring(mac->netdev);
> +
> +	return count;
> +}
> +
> +static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
> +{
> +	int i;
> +	struct pasemi_mac_buffer *info;
> +	struct pas_dma_xct_descr *dp;
> +	int start;
> +	int count;
> +
> +	start = mac->tx->next_to_clean;
> +	count = 0;
> +
> +	for (i = start; i < mac->tx->next_to_use; i++) {
> +		dp = &DESCR(mac->tx, i);
> +		if (!dp || (dp->mactx & XCT_MACTX_O))
> +			break;
> +
> +		count++;
> +
> +		info = &INFO(mac->tx, i);
> +
> +		dev_kfree_skb_irq(info->skb);
> +		info->skb = NULL;
> +		info->dma = 0;
> +		dp->mactx = 0;
> +		dp->ptr = 0;
> +	}
> +	mac->tx->next_to_clean += count;
> +	return count;
> +}
> +
> +
> +static int pasemi_mac_poll(struct net_device *dev, int *budget)
> +{
> +	int pkts, limit = min(*budget, dev->quota);
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +
> +	pkts = pasemi_mac_clean_rx(mac, limit);
> +
> +	if (pkts < limit) {
> +		/* all done, no more packets present */
> +		netif_rx_complete(dev);
> +
> +		/* re-enable receive interrupts */
> +		pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
> +				       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
> +		return 0;
> +	} else {
> +		/* used up our quantum, so reschedule */
> +		dev->quota -= pkts;
> +		*budget -= pkts;
> +		return 1;
> +	}
> +}
> +
> +
> +static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
> +{
> +	struct net_device *dev = data;
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int reg;
> +
> +	netif_rx_schedule(dev);
> +	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
> +			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
> +
> +	reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
> +	      PAS_IOB_DMA_RXCH_RESET_DINTC;
> +	if (*mac->rx_status & PAS_STATUS_TIMER)
> +		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
> +
> +	pci_write_config_dword(mac->iob_pdev,
> +			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
> +
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
> +{
> +	struct net_device *dev = data;
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int reg;
> +
> +	pasemi_mac_clean_tx(mac);
> +
> +	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
> +	if (*mac->tx_status & PAS_STATUS_TIMER)
> +		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
> +
> +	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
> +			       reg);
> +
> +	return IRQ_HANDLED;
> +}

To do shared IRQ's properly you need to check to see if
this is your device IRQ or not. Maybe reading config value?

> +static int __devinit
> +pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
> +{
> +	static int index = 0;
> +	struct net_device *dev;
> +	struct pasemi_mac *mac;
> +	int err;
> +
> +	err = pci_enable_device(pdev);
> +	if (err) {
> +		dev_err(&pdev->dev, "pasemi_mac: Could not enable device.\n");
> +		return -ENODEV;

Please return err instead. This allows propagating a possibly
useful value.

> +	}
> +	dev = alloc_etherdev(sizeof(struct pasemi_mac));
> +	if (dev == NULL) {
> +		dev_err(&pdev->dev,
> +			"pasemi_mac: Could not allocate ethernet device.\n");
> +		return -ENODEV;
	return -ENOMEM;

> +	}
> +	SET_MODULE_OWNER(dev);
> +
> +	pci_set_drvdata(pdev, dev);
> +	SET_NETDEV_DEV(dev, &pdev->dev);
> +
> +	mac = netdev_priv(dev);
> +	memset(mac, 0, sizeof(struct pasemi_mac));

Unnecessary, alloc_etherdev already zeros.

> +	mac->pdev = pdev;
> +	mac->netdev = dev;
> +	mac->dma_pdev = pci_find_device(0x1959, 0xa007, NULL);

Do not use pci_find_device in new code. See comment in kernel doc's.
Use pci_get_device instead. You are not checking for error values.

> +	mac->iob_pdev = pci_find_device(0x1959, 0xa001, NULL);
> +
> +	if (!mac->iob_pdev) {
> +		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
> +		return -ENODEV;
> +	}
> +
> +	/* These should come out of the device tree eventually */
> +	mac->dma_txch = index;
> +	mac->dma_rxch = index;
> +
> +	/* We probe GMAC before XAUI, but the DMA interfaces are
> +	 * in XAUI, GMAC order.
> +	 */
> +	if (index < 4)
> +		mac->dma_if = index + 2;
> +	else
> +		mac->dma_if = index - 4;
> +	index++;
> +
> +	switch (pdev->device) {
> +	case 0xa005:
> +		mac->type = MAC_TYPE_GMAC;
> +		break;
> +	case 0xa006:
> +		mac->type = MAC_TYPE_XAUI;
> +		break;
> +	default:
> +		err = -ENODEV;
> +		goto out;
> +	}
> +
> +	/* get mac addr from device tree */
> +	if (pasemi_set_mac_addr(mac)) {
> +		err = -ENODEV;
> +		goto out;
> +	}
> +	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
> +
> +	strcpy(dev->name, "eth%d");

alloc_etherdev already sets this for dev->name


> +	dev->open = pasemi_mac_open;
> +	dev->stop = pasemi_mac_close;
> +	dev->hard_start_xmit = pasemi_mac_start_tx;
> +	dev->get_stats = pasemi_mac_get_stats;
> +	dev->set_multicast_list = pasemi_mac_set_rx_mode;
> +	dev->weight = 64;
> +	dev->poll = pasemi_mac_poll;
> +	dev->features = NETIF_F_HW_CSUM;
> +
> +	/* The dma status structure is located in the I/O bridge, and
> +	 * is cache coherent.
> +	 */
> +	if (!dma_status)
> +		/* XXXOJN This should come from the device tree */
> +		dma_status = __ioremap(0xfd800000, 0x1000, 0);
> +
> +	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
> +	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
> +
> +	err = register_netdev(dev);
> +
> +	if (err)
> +		printk("register_netdev failed with error %d\n", err);

You are leaking netdevice, and all your pci setup needs to
be undone.

> +	else
> +		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
> +		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
> +		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
> +		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
> +		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
> +		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
> +
> +	return err;
> +
> +out:
> +	printk(KERN_ERR "pasemi_mac: init failed\n");

You need to unwind the pci_ stuff.

> +	pci_disable_device(pdev);
> +	free_netdev(dev);
> +	return err;
> +}
> +
> +static struct pci_device_id pasemi_mac_pci_tbl[] = {
> +	{ PCI_DEVICE(0x1959, 0xa005) },
> +	{ PCI_DEVICE(0x1959, 0xa006) },
> +	{ 0 }
> +};
> +
> +MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
> +
> +static struct pci_driver pasemi_mac_driver = {
> +	.name = "pasemi_mac",
> +	.id_table = pasemi_mac_pci_tbl,
> +	.probe = pasemi_mac_probe,

Don't you need a remove routine?

> +};
> +
> +static void __exit pasemi_mac_cleanup_module(void)
> +{
> +	pci_unregister_driver(&pasemi_mac_driver);
> +}
> +
> +int pasemi_mac_init_module(void)
> +{
> +	return pci_module_init(&pasemi_mac_driver);

pci_module_init is marked as obsolete, please use pci_register_driver

> +}
> +module_init(pasemi_mac_init_module);
> +module_exit(pasemi_mac_cleanup_module);


-- 
Stephen Hemminger <shemminger@linux-foundation.org>

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-29  6:08 [PATCH] PA Semi PWRficient Ethernet driver Olof Johansson
  2007-01-29 18:22 ` Stephen Hemminger
@ 2007-01-29 22:35 ` Francois Romieu
  2007-01-30  1:41   ` Olof Johansson
  2007-01-30  1:44 ` [PATCH] [v2]PA " Olof Johansson
  2007-01-30 10:03 ` [PATCH] " Christoph Hellwig
  3 siblings, 1 reply; 23+ messages in thread
From: Francois Romieu @ 2007-01-29 22:35 UTC (permalink / raw)
  To: Olof Johansson; +Cc: jgarzik, netdev

Olof Johansson <olof@lixom.net> :
> Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)
> 
> Basic enablement, will be complemented with performance enhancements
> over time. PHY support will be added as well.

- The driver does not contain a single SMP locking instruction but
  http://www.pasemi.com claims the platform to be multicore.
  Is the driver really designed to be lockless ?

- Is there really no other choice than constantly accessing the
  registers of the device through pci_write_config_dword() ?
  No PCI BAR remappable area ?

- Is there a volunteer to maintain the driver ? If so the MAINTAINERS
  file should be updated (hint, hint).

- No known public documentation for the hardware ?

Inlined remarks below.

[...]
> Index: merge/drivers/net/pasemi_mac.c
> ===================================================================
> --- /dev/null
> +++ merge/drivers/net/pasemi_mac.c
> @@ -0,0 +1,797 @@
> +/*
> + * Copyright (C) 2006-2007 PA Semi, Inc
> + *
> + * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
> + */
> +
> +#include <linux/init.h>
> +#include <linux/module.h>
> +#include <linux/pci.h>
> +#include <linux/interrupt.h>
> +#include <linux/dmaengine.h>
> +#include <linux/delay.h>
> +#include <linux/netdevice.h>
> +#include <linux/etherdevice.h>
> +#include <asm/dma-mapping.h>
> +#include <linux/in.h>
> +#include <linux/skbuff.h>
> +
> +#include <linux/ip.h>
> +#include <linux/tcp.h>
> +#include <net/checksum.h>
> +
> +#include "pasemi_mac.h"
> +
> +#define INITIAL_RX_RING_SIZE 512
> +#define INITIAL_TX_RING_SIZE 512
> +
> +#define BUF_SIZE 2048

Is there a specific reason for this rather unusual size ?

> +
> +#define PAS_DMA_MAX_IF     40
> +#define PAS_DMA_MAX_RXCH   8
> +#define PAS_DMA_MAX_TXCH   8
> +
> +/* XXXOJN these should come out of the device tree some day */
> +#define PAS_DMA_CAP_BASE   0xe00d0040
> +#define PAS_DMA_CAP_SIZE   0x100
> +#define PAS_DMA_COM_BASE   0xe00d0100
> +#define PAS_DMA_COM_SIZE   0x100
> +
> +static irqreturn_t pasemi_mac_tx_intr(int, void *);
> +static irqreturn_t pasemi_mac_rx_intr(int, void *);
> +static int pasemi_mac_clean_tx(struct pasemi_mac *mac);
> +static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit);

Reorder and remove the forward declarations ?

> +
> +static struct pasdma_status *dma_status;
> +
> +static int pasemi_set_mac_addr(struct pasemi_mac *mac)
> +{
> +	struct pci_dev *pdev = mac->pdev;
> +	struct device_node *dn = pci_device_to_OF_node(pdev);
> +	const u8 *maddr;
> +	u8 addr[6];
> +
> +	if (!dn) {
> +		dev_dbg(&pdev->dev,
> +			  "No device node for mac, not configuring\n");
> +		return -ENOENT;
> +	}
> +
> +	maddr = get_property(dn, "mac-address", NULL);
> +	if (maddr == NULL) {
> +		dev_warn(&pdev->dev,
> +			 "no mac address in device tree, not configuring\n");
> +		return -ENOENT;
> +	}
> +
> +	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
> +		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
> +		dev_warn(&pdev->dev,
> +			 "can't parse mac address, not configuring\n");
> +		return -EINVAL;
> +	}
> +
> +	memcpy(mac->mac_addr, addr, sizeof(addr));

Add a check for is_valid_ether_addr() ?

> +	return 0;
> +}
> +
> +static void pasemi_mac_setup_rx_resources(struct net_device *dev)
> +{
> +	struct pasemi_mac_rxring *ring;
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	int chan_id = mac->dma_rxch;
> +
> +	ring = kzalloc(sizeof(*ring), GFP_KERNEL);

k*alloc can fail. Please check !ring.

> +
> +	ring->count = INITIAL_RX_RING_SIZE;
> +
> +	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
> +				  GFP_KERNEL);
> +
> +	/* Allocate descriptors */
> +	ring->desc = (void *)__get_free_pages(GFP_KERNEL,
> +				      get_order(ring->count *
> +				      sizeof(struct pas_dma_xct_descr)));
> +	ring->dma = virt_to_phys(ring->desc);
> +	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));
> +
> +	ring->buffers = (void *)__get_free_pages(GFP_KERNEL,
> +					 get_order(ring->count * sizeof(u64)));
> +	ring->buf_dma = virt_to_phys(ring->buffers);
> +	memset(ring->buffers, 0, ring->count * sizeof(u64));

Use pci_alloc_consistent() ?

> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
> +			       PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
> +			       PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
> +			       PAS_DMA_RXCHAN_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 2));
> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
> +			       PAS_DMA_RXCHAN_CFG_HBU(1));
> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
> +			       PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
> +			       PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
> +			       PAS_DMA_RXINT_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 3));
> +
> +	ring->next_to_fill = 0; ring->next_to_clean = 0;

Line feed please.

> +	mac->rx = ring;
> +}
> +
> +
> +static void pasemi_mac_setup_tx_resources(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	u32 val;
> +	int chan_id = mac->dma_txch;
> +	struct pasemi_mac_txring *ring;
> +
> +	ring = kzalloc(sizeof(*ring), GFP_KERNEL);

k*alloc can fail.

> +
> +	ring->count = INITIAL_TX_RING_SIZE;
> +
> +	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
> +				  GFP_KERNEL);
> +	/* Allocate descriptors */
> +	ring->desc = (void *)__get_free_pages(GFP_KERNEL,
> +				      get_order(ring->count *
> +				      sizeof(struct pas_dma_xct_descr)));
> +	ring->dma = virt_to_phys(ring->desc);
> +
> +	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));

Use pci_alloc_consistent() ?

> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
> +			       PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
> +	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
> +	val |= PAS_DMA_TXCHAN_BASEU_SIZ(INITIAL_TX_RING_SIZE >> 2);
> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
> +
> +	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
> +			       PAS_DMA_TXCHAN_CFG_TY_IFACE |
> +			       PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
> +			       PAS_DMA_TXCHAN_CFG_UP |
> +			       PAS_DMA_TXCHAN_CFG_WT(2));
> +
> +	ring->next_to_use = 0; ring->next_to_clean = 0;

Line feed please.

> +	mac->tx = ring;
> +}
> +
> +static noinline void pasemi_mac_free_resources(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	int i;

unsigned int is supposed to save some cycles on ppc.

> +
> +	for (i = 0; i < mac->tx->count; i++) {
> +		if (INFO(mac->tx, i).dma) {
> +			pr_debug("cleaning tx %d, dma addr %lx\n", i, INFO(mac->tx, i).dma);
> +			if (INFO(mac->tx, i).skb)
> +				dev_kfree_skb_any(INFO(mac->tx, i).skb);
> +			INFO(mac->tx, i).dma = 0;
> +			INFO(mac->tx, i).skb = 0;
> +			DESCR(mac->tx, i).mactx = 0;
> +			DESCR(mac->tx, i).ptr = 0;
> +		}
> +	}
> +
> +	/* Add free of all data structures here */
> +	free_pages((unsigned long)mac->tx->desc, get_order(
> +			mac->tx->count * sizeof(struct pas_dma_xct_descr)));
> +
> +	kfree(mac->tx);
> +	mac->tx = NULL;
> +
> +	for (i = 0; i < mac->rx->count; i++) {
> +		if (INFO(mac->rx, i).dma) {
> +			pr_debug("cleaning rx %d, dma addr %lx\n", i, INFO(mac->rx, i).dma);
> +			if (INFO(mac->rx, i).skb)
> +				dev_kfree_skb_any(INFO(mac->rx, i).skb);
> +			INFO(mac->rx, i).dma = 0;
> +			INFO(mac->rx, i).skb = 0;
> +			DESCR(mac->rx, i).macrx = 0;
> +			DESCR(mac->rx, i).ptr = 0;
> +		}
> +	}
> +
> +	free_pages((unsigned long)mac->rx->desc, get_order(mac->rx->count *
> +		   sizeof(struct pas_dma_xct_descr)));
> +
> +	free_pages((unsigned long)mac->rx->buffers,
> +		   get_order(mac->rx->count * sizeof(u64)));
> +
> +	kfree(mac->rx);
> +	mac->rx = NULL;
> +}
> +
> +static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int i;
> +	dma_addr_t dma;
> +	struct sk_buff *skb;
> +	int start = mac->rx->next_to_fill;
> +	int count;
> +
> +	count = ((mac->rx->next_to_clean & ~7) + mac->rx->count -
> +		 mac->rx->next_to_fill) % mac->rx->count;
> +
> +	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0)) {
> +		pr_debug("first time fill, clean %d fill %d\n",
> +			 mac->rx->next_to_clean, mac->rx->next_to_fill);
> +		count = mac->rx->count - 8;
> +	}
> +
> +	/* Limit so we don't go into the last cache line */
> +	count -= 8;
> +
> +	if (count <= 0)
> +		return;
> +
> +	for (i = start; i < start+count; i++) {
                                ^^^
> +		skb = dev_alloc_skb(BUF_SIZE);
> +
> +		if (!skb)
> +			return;
> +
> +		skb->dev = dev;
> +
> +		dma = virt_to_phys(skb->data);

Use pci_map_single() and friends ?

It is described in Documentation/DMA-mapping.txt and widely used
in the in-kernel drivers.

[...]
> +static int pasemi_mac_open(struct net_device *dev)
> +{
[...]
> +	if (ret)
> +		printk("request_irq of irq %d failed: %d\n",
> +		       mac->dma_pdev->irq + mac->dma_txch, ret);


Missing KERN_XYZ.

> +
> +	ret = request_irq(128 + 20 + mac->dma_rxch, &pasemi_mac_rx_intr,
> +			  IRQF_DISABLED, "pasemi_mac rx", dev);
> +	if (ret)
> +		printk("request_irq of irq %d failed: %d\n",
> +		       mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);

Missing KERN_XYZ.

[...]
> +static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	struct pasemi_mac_txring *txring;
> +	u64 flags;
> +	dma_addr_t map;
> +
> +	if (mac->tx->next_to_clean+mac->tx->count == mac->tx->next_to_use)
> +		pasemi_mac_clean_tx(mac);
> +
> +	mac->stats.tx_packets++;
> +	mac->stats.tx_bytes += skb->len;
> +
> +	txring = mac->tx;
> +
> +	flags = XCT_MACTX_O | XCT_MACTX_ST |
> +		XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
> +
> +	if (skb->ip_summed == CHECKSUM_PARTIAL) {
> +		switch (skb->nh.iph->protocol) {
> +		case IPPROTO_TCP:
> +			flags |= XCT_MACTX_CSUM_TCP;
> +			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
> +			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
> +			break;
> +		case IPPROTO_UDP:
> +			flags |= XCT_MACTX_CSUM_UDP;
> +			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
> +			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
> +			break;
> +		}
> +	}
> +
> +	map = virt_to_phys(skb->data);

Use pci_map_single() and friends ?

> +
> +	DESCR(txring, txring->next_to_use).mactx = flags |
> +						XCT_MACTX_LLEN(skb->len);
> +	DESCR(txring, txring->next_to_use).ptr = XCT_PTR_LEN(skb->len) |
> +						XCT_PTR_ADDR(map);
> +	INFO(txring, txring->next_to_use).dma = map;
> +	INFO(txring, txring->next_to_use).skb = skb;
> +	/* XXXOJN Deal with fragmented packets when larger MTU is supported */
> +
> +	txring->next_to_use++;
> +
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
> +
> +	return NETDEV_TX_OK;

How is the TX process stopped when the ring gets full ?

> +}
> +
> +static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +
> +	return &mac->stats;
> +}
> +
> +static void pasemi_mac_set_rx_mode(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int flags;
> +
> +	return;

Huh ?

> +
> +	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
> +
> +	/* Set promiscuous */
> +	if (dev->flags & IFF_PROMISC)
> +		flags |= PAS_MAC_CFG_PCFG_PR;
> +	else
> +		flags &= ~PAS_MAC_CFG_PCFG_PR;
> +
> +	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
> +}
> +
> +
> +static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
> +{
> +	int i, j;

unsigned int ?

Needlessly wide scope for j...

> +	struct pas_dma_xct_descr descr;
> +	struct pasemi_mac_buffer *info;
> +	struct sk_buff *skb;

... and for descr/info/skb...

> +	unsigned int len;
> +	int start;
> +	int count;
> +	dma_addr_t dma;

... and for dma.

> +
> +	start = mac->rx->next_to_clean;
> +	count = 0;
> +
> +	for (i = start; i < start+mac->rx->count && count < limit; i++) {
                                ^^^
I would not protest against a few parenthesis here and there.

> +		rmb();
> +		mb();

rmb() _and_ mb() ?

Btw a scroll of ancient incantation is available in
Documentation/memory-barriers.txt btw.

> +		descr = DESCR(mac->rx, i);
> +		if (!(descr.macrx & XCT_MACRX_O))
> +			break;
> +
> +		count++;
> +
> +		info = NULL;
> +
> +		/* We have to scan for our skb since there's no way
> +		 * to back-map them from the descriptor, and if we
> +		 * have several receive channels then they might not
> +		 * show up in the same order as they were put on the
> +		 * interface ring.
> +		 */
> +
> +		dma = (descr.ptr & XCT_PTR_ADDR_M);
> +		for (j = start; j < start+mac->rx->count; j++)
> +			if (INFO(mac->rx, j).dma == dma) {
> +				info = &INFO(mac->rx, j);
> +				break;
> +			}

This is not a single line statement: please add curly-braces.

[...]
> +static int __devinit
> +pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
> +{
[..]
> +	/* The dma status structure is located in the I/O bridge, and
> +	 * is cache coherent.
> +	 */
> +	if (!dma_status)
> +		/* XXXOJN This should come from the device tree */
> +		dma_status = __ioremap(0xfd800000, 0x1000, 0);

Is this address really set in stone or can it be retrieved after some
pci_get_device(...) practice ?

> +
> +	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
> +	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];

Addresses returned from ioremap are not guaranteed to be dereferencable
like that.

> +
> +	err = register_netdev(dev);
> +
> +	if (err)
> +		printk("register_netdev failed with error %d\n", err);

Missing KERN_XYZ.

> +	else
> +		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
> +		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
> +		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
> +		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
> +		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
> +		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
> +
> +	return err;
> +
> +out:
> +	printk(KERN_ERR "pasemi_mac: init failed\n");
> +
> +	pci_disable_device(pdev);
> +	free_netdev(dev);
> +	return err;
> +}
> +
> +static struct pci_device_id pasemi_mac_pci_tbl[] = {
> +	{ PCI_DEVICE(0x1959, 0xa005) },
> +	{ PCI_DEVICE(0x1959, 0xa006) },

Minor nit: just use a #define for the vendor ID and you will simply
submit a one-line removal the day pci_ids is updated.

> +	{ 0 }
> +};
> +
> +MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
> +
> +static struct pci_driver pasemi_mac_driver = {
> +	.name = "pasemi_mac",
> +	.id_table = pasemi_mac_pci_tbl,
> +	.probe = pasemi_mac_probe,

	.name		= "pasemi_mac",
	.id_table	= pasemi_mac_pci_tbl,
	.probe		= pasemi_mac_probe,
[...]
> Index: merge/drivers/net/pasemi_mac.h
> ===================================================================
> --- /dev/null
> +++ merge/drivers/net/pasemi_mac.h
[...]
> +/* Number of unused descriptors, considering ring wraparounds */
> +#define PASEMI_MAC_DESC_UNUSED(ring) ((((ring)->next_to_clean >		\
> +					(ring)->next_to_use) ?		\
> +					  0 :				\
> +					  (ring)->count) +		\
> +					  (ring)->next_to_clean -	\
> +					  (ring)->next_to_use - 1)
> +
> +#define DESCR(ring, i) ((ring)->desc[i % ((ring)->count)])
> +#define BUFF(ring, i) ((ring)->buffers[i % ((ring)->count)])
> +#define INFO(ring, i) ((ring)->desc_info[i % ((ring)->count)])

A bit ugly/obfuscating/name clash prone imvho.

Use local variables ?

> +
> +struct pasemi_mac {
> +	struct net_device *netdev;
> +	struct pci_dev *pdev;
> +	struct pci_dev *dma_pdev;
> +	struct pci_dev *iob_pdev;
> +	struct net_device_stats stats;
> +
> +	/* Pointer to the cacheable per-channel status registers */
> +	uint64_t	*rx_status;
> +	uint64_t	*tx_status;

No uintxy_t please. Use plain u64.

-- 
Ueimor

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-29 18:22 ` Stephen Hemminger
@ 2007-01-30  1:41   ` Olof Johansson
  2007-01-30  2:34     ` Jeff Garzik
  0 siblings, 1 reply; 23+ messages in thread
From: Olof Johansson @ 2007-01-30  1:41 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: jgarzik, netdev

On Mon, Jan 29, 2007 at 10:22:33AM -0800, Stephen Hemminger wrote:
> Basic initalization, setup comments.

Thanks, fixes have been incorporated and will be reposted. Most of them
was obviously just my lack of diligence. See however the two below.

> > +static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
> > +{
> > +	struct net_device *dev = data;
> > +	struct pasemi_mac *mac = netdev_priv(dev);
> > +	unsigned int reg;
> > +
> > +	pasemi_mac_clean_tx(mac);
> > +
> > +	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
> > +	if (*mac->tx_status & PAS_STATUS_TIMER)
> > +		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
> > +
> > +	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
> > +			       reg);
> > +
> > +	return IRQ_HANDLED;
> > +}
> 
> To do shared IRQ's properly you need to check to see if
> this is your device IRQ or not. Maybe reading config value?

Right now it's guaranteed that the interrupts will not be shared. They're
fixed for the on-chip devices, and no other driver should be binding to
the same channels (and thus irqs).

If it changes in the future, the driver would need other rework as well.

> > +
> > +static struct pci_driver pasemi_mac_driver = {
> > +   .name = "pasemi_mac",
> > +   .id_table = pasemi_mac_pci_tbl,
> > +   .probe = pasemi_mac_probe,
> 
> Don't you need a remove routine?

No hotplug support at this time, so I didn't see any use in providing one.


-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-29 22:35 ` Francois Romieu
@ 2007-01-30  1:41   ` Olof Johansson
  2007-01-30 10:06     ` Christoph Hellwig
  2007-01-30 21:45     ` Francois Romieu
  0 siblings, 2 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-30  1:41 UTC (permalink / raw)
  To: Francois Romieu; +Cc: jgarzik, netdev

Hi,

Thanks for the comments. In general I have applied them, with some specific
comments below.

I'll repost a new version of the driver based on this and other feedback
later tonight.

On Mon, Jan 29, 2007 at 11:35:06PM +0100, Francois Romieu wrote:
> Olof Johansson <olof@lixom.net> :
> > Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)
> > 
> > Basic enablement, will be complemented with performance enhancements
> > over time. PHY support will be added as well.
> 
> - The driver does not contain a single SMP locking instruction but
>   http://www.pasemi.com claims the platform to be multicore.
>   Is the driver really designed to be lockless ?

Unless I misunderstood something, NAPI drivers that don't set NETIF_F_LLTX
will have all locking taken care of by higher layers, no?

> - Is there really no other choice than constantly accessing the
>   registers of the device through pci_write_config_dword() ?
>   No PCI BAR remappable area ?

Maybe a bit of introduction could be useful (also regarding the
pci_map/alloc comments below).

Our devices are on-chip, and while they're not on a PCI(e) bus internally,
they do have config headers and will show up as devices on a pseudo-bus
(the root one, in fact).

Also, while the driver could go through the IOMMU layers, there's no
real use in doing so at this time.

When it comes to register access -- it would probably make sense to
remap them separately and use normal accessors instead of going through
the quite heavyweight PCI config accessors. I used them right now for
convenience.

As I already mentioned, there's not been a whole lot of performance work
done on this driver yet; I expect to address this when I get started
on that.

> - Is there a volunteer to maintain the driver ? If so the MAINTAINERS
>   file should be updated (hint, hint).

Yep, forgot to include it.

> - No known public documentation for the hardware ?

Not at this time, but the driver will be actively maintained so it
shouldn't be an issue.

> Inlined remarks below.

Comments to some of them below.





> > +#define BUF_SIZE 2048
> 
> Is there a specific reason for this rather unusual size ?

A nice and round and large enough number. But no, no real reason. Fixed.

(And yes, large MTU support is also on the todo list. :-)

> > +	ring->buffers = (void *)__get_free_pages(GFP_KERNEL,
> > +					 get_order(ring->count * sizeof(u64)));
> > +	ring->buf_dma = virt_to_phys(ring->buffers);
> > +	memset(ring->buffers, 0, ring->count * sizeof(u64));
> 
> Use pci_alloc_consistent() ?

Nope, for reasons above.

> > +static noinline void pasemi_mac_free_resources(struct net_device *dev)
> > +{
> > +	struct pasemi_mac *mac = netdev_priv(dev);
> > +	int i;
> 
> unsigned int is supposed to save some cycles on ppc.

Who told you that? That's not true.
Still, there's no need for the iterator to be signed.

> > +	if (ret)
> > +		printk("request_irq of irq %d failed: %d\n",
> > +		       mac->dma_pdev->irq + mac->dma_txch, ret);
> 
> 
> Missing KERN_XYZ.

Changed all the printk's to be dev_*() instead based on Stephen's comments.

> > +static void pasemi_mac_set_rx_mode(struct net_device *dev)
> > +{
> > +	struct pasemi_mac *mac = netdev_priv(dev);
> > +	unsigned int flags;
> > +
> > +	return;
> 
> Huh ?

Yeah, forgot it there from debugging. Can't even remember why I added
it and I obviously missed it when going through before posting.

> > +
> > +	for (i = start; i < start+mac->rx->count && count < limit; i++) {
>                                 ^^^
> I would not protest against a few parenthesis here and there.
> 
> > +		rmb();
> > +		mb();
> 
> rmb() _and_ mb() ?
> 
> Btw a scroll of ancient incantation is available in
> Documentation/memory-barriers.txt btw.

Not sure why they were still left in there. Only rmb is needed. Same
for the barrier at the bottom of the loop.

> > +static int __devinit
> > +pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
> > +{
> [..]
> > +	/* The dma status structure is located in the I/O bridge, and
> > +	 * is cache coherent.
> > +	 */
> > +	if (!dma_status)
> > +		/* XXXOJN This should come from the device tree */
> > +		dma_status = __ioremap(0xfd800000, 0x1000, 0);
> 
> Is this address really set in stone or can it be retrieved after some
> pci_get_device(...) practice ?

As the comment says -- one day it should come out of the device tree. It's
a well-known and not dynamic address on the current chips; but it might be
located somewhere else on future products.

> > +	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
> > +	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
> 
> Addresses returned from ioremap are not guaranteed to be dereferencable
> like that.

That's why I'm using __ioremap instead, to get a cacheable regular area
to just reference.

Is there another preferred method of doing this? Note that it is a
cache-coherent status area, so regular ioremap() is not the solution.

> > +	{ PCI_DEVICE(0x1959, 0xa005) },
> > +	{ PCI_DEVICE(0x1959, 0xa006) },
> 
> Minor nit: just use a #define for the vendor ID and you will simply
> submit a one-line removal the day pci_ids is updated.

I'll just include the vendor ID in this patch instead.

> > +#define DESCR(ring, i) ((ring)->desc[i % ((ring)->count)])
> > +#define BUFF(ring, i) ((ring)->buffers[i % ((ring)->count)])
> > +#define INFO(ring, i) ((ring)->desc_info[i % ((ring)->count)])
> 
> A bit ugly/obfuscating/name clash prone imvho.
> 
> Use local variables ?

I'm open for suggestions here, not sure how local variables will help though?


-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] [v2]PA Semi PWRficient Ethernet driver
  2007-01-29  6:08 [PATCH] PA Semi PWRficient Ethernet driver Olof Johansson
  2007-01-29 18:22 ` Stephen Hemminger
  2007-01-29 22:35 ` Francois Romieu
@ 2007-01-30  1:44 ` Olof Johansson
  2007-01-31  5:44   ` [PATCH] [v3] PA " Olof Johansson
  2007-01-30 10:03 ` [PATCH] " Christoph Hellwig
  3 siblings, 1 reply; 23+ messages in thread
From: Olof Johansson @ 2007-01-30  1:44 UTC (permalink / raw)
  To: jgarzik; +Cc: netdev

Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)

Basic enablement, will be complemented with performance enhancements
over time. PHY support will be added as well.

This patch still uses numerical PCI IDs, they will be replaced when the
pci_ids.h change goes in, together with other currently pending drivers.

Signed-off-by: Olof Johansson <olof@lixom.net>


---

This version contains changes based on comments from Stephen Hemminger
and Francois Romieu.


Index: merge/drivers/net/Kconfig
===================================================================
--- merge.orig/drivers/net/Kconfig
+++ merge/drivers/net/Kconfig
@@ -2348,6 +2348,13 @@ config QLA3XXX
 	  To compile this driver as a module, choose M here: the module
 	  will be called qla3xxx.
 
+config PASEMI_MAC
+	tristate "PA Semi 1/10Gbit MAC"
+	depends on PPC64 && PCI
+	help
+	  This driver supports the on-chip 1/10Gbit Ethernet controller on
+	  PA Semi's PWRficient line of chips.
+
 endmenu
 
 #
Index: merge/drivers/net/Makefile
===================================================================
--- merge.orig/drivers/net/Makefile
+++ merge/drivers/net/Makefile
@@ -196,6 +196,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
 
 obj-$(CONFIG_MACB) += macb.o
 
Index: merge/drivers/net/pasemi_mac.c
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+#include "pasemi_mac.h"
+
+#define INITIAL_RX_RING_SIZE 512
+#define INITIAL_TX_RING_SIZE 512
+
+#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+
+#define PAS_DMA_MAX_IF     40
+#define PAS_DMA_MAX_RXCH   8
+#define PAS_DMA_MAX_TXCH   8
+
+/* XXXOJN these should come out of the device tree some day */
+#define PAS_DMA_CAP_BASE   0xe00d0040
+#define PAS_DMA_CAP_SIZE   0x100
+#define PAS_DMA_COM_BASE   0xe00d0100
+#define PAS_DMA_COM_SIZE   0x100
+
+static struct pasdma_status *dma_status;
+
+static int pasemi_set_mac_addr(struct pasemi_mac *mac)
+{
+	struct pci_dev *pdev = mac->pdev;
+	struct device_node *dn = pci_device_to_OF_node(pdev);
+	const u8 *maddr;
+	u8 addr[6];
+
+	if (!dn) {
+		dev_dbg(&pdev->dev,
+			  "No device node for mac, not configuring\n");
+		return -ENOENT;
+	}
+
+	maddr = get_property(dn, "mac-address", NULL);
+	if (maddr == NULL) {
+		dev_warn(&pdev->dev,
+			 "no mac address in device tree, not configuring\n");
+		return -ENOENT;
+	}
+
+	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
+		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+		dev_warn(&pdev->dev,
+			 "can't parse mac address, not configuring\n");
+		return -EINVAL;
+	}
+
+	memcpy(mac->mac_addr, addr, sizeof(addr));
+	return 0;
+}
+
+static int pasemi_mac_setup_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac_rxring *ring;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	int chan_id = mac->dma_rxch;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+	if (!ring)
+		goto out_ring;
+
+	ring->count = INITIAL_RX_RING_SIZE;
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
+				  GFP_KERNEL);
+
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = (void *)__get_free_pages(GFP_KERNEL,
+				      get_order(ring->count *
+				      sizeof(struct pas_dma_xct_descr)));
+
+	if (!ring->desc)
+		goto out_desc;
+
+	ring->dma = virt_to_phys(ring->desc);
+	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));
+
+	ring->buffers = (void *)__get_free_pages(GFP_KERNEL,
+					 get_order(ring->count * sizeof(u64)));
+	if (!ring->buffers)
+		goto out_buffers;
+
+	ring->buf_dma = virt_to_phys(ring->buffers);
+	memset(ring->buffers, 0, ring->count * sizeof(u64));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
+			       PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
+			       PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
+			       PAS_DMA_RXCHAN_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 2));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
+			       PAS_DMA_RXCHAN_CFG_HBU(1));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
+			       PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
+			       PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
+			       PAS_DMA_RXINT_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 3));
+
+	ring->next_to_fill = 0;
+	ring->next_to_clean = 0;
+	mac->rx = ring;
+
+	return 0;
+
+out_buffers:
+	kfree(ring->desc);
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+
+static int pasemi_mac_setup_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	u32 val;
+	int chan_id = mac->dma_txch;
+	struct pasemi_mac_txring *ring;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		goto out_ring;
+
+	ring->count = INITIAL_TX_RING_SIZE;
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
+				  GFP_KERNEL);
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = (void *)__get_free_pages(GFP_KERNEL,
+				      get_order(ring->count *
+				      sizeof(struct pas_dma_xct_descr)));
+	if (!ring->desc)
+		goto out_desc;
+	ring->dma = virt_to_phys(ring->desc);
+
+	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
+			       PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
+	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
+	val |= PAS_DMA_TXCHAN_BASEU_SIZ(INITIAL_TX_RING_SIZE >> 2);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
+			       PAS_DMA_TXCHAN_CFG_TY_IFACE |
+			       PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
+			       PAS_DMA_TXCHAN_CFG_UP |
+			       PAS_DMA_TXCHAN_CFG_WT(2));
+
+	ring->next_to_use = 0; ring->next_to_clean = 0;
+	mac->tx = ring;
+
+	return 0;
+
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+static noinline void pasemi_mac_free_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+
+	for (i = 0; i < mac->tx->count; i++) {
+		if (INFO(mac->tx, i).dma) {
+			if (INFO(mac->tx, i).skb)
+				dev_kfree_skb_any(INFO(mac->tx, i).skb);
+			INFO(mac->tx, i).dma = 0;
+			INFO(mac->tx, i).skb = 0;
+			DESCR(mac->tx, i).mactx = 0;
+			DESCR(mac->tx, i).ptr = 0;
+		}
+	}
+
+	/* Add free of all data structures here */
+	free_pages((unsigned long)mac->tx->desc, get_order(
+			mac->tx->count * sizeof(struct pas_dma_xct_descr)));
+
+	kfree(mac->tx);
+	mac->tx = NULL;
+}
+
+static noinline void pasemi_mac_free_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+
+	for (i = 0; i < mac->rx->count; i++) {
+		if (INFO(mac->rx, i).dma) {
+			if (INFO(mac->rx, i).skb)
+				dev_kfree_skb_any(INFO(mac->rx, i).skb);
+			INFO(mac->rx, i).dma = 0;
+			INFO(mac->rx, i).skb = 0;
+			DESCR(mac->rx, i).macrx = 0;
+			DESCR(mac->rx, i).ptr = 0;
+		}
+	}
+
+	free_pages((unsigned long)mac->rx->desc, get_order(mac->rx->count *
+		   sizeof(struct pas_dma_xct_descr)));
+
+	free_pages((unsigned long)mac->rx->buffers,
+		   get_order(mac->rx->count * sizeof(u64)));
+
+	kfree(mac->rx);
+	mac->rx = NULL;
+}
+
+static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	dma_addr_t dma;
+	struct sk_buff *skb;
+	int start = mac->rx->next_to_fill;
+	int count;
+
+	count = ((mac->rx->next_to_clean & ~7) + mac->rx->count -
+		 mac->rx->next_to_fill) % mac->rx->count;
+
+	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
+		count = mac->rx->count - 8;
+
+	/* Limit so we don't go into the last cache line */
+	count -= 8;
+
+	if (count <= 0)
+		return;
+
+	for (i = start; i < start + count; i++) {
+		skb = dev_alloc_skb(BUF_SIZE);
+
+		if (!skb)
+			return;
+
+		skb->dev = dev;
+
+		dma = virt_to_phys(skb->data);
+		INFO(mac->rx, i).skb = skb;
+		INFO(mac->rx, i).dma = dma;
+		BUFF(mac->rx, i) = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+	}
+
+	wmb();
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
+			       count);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_INCR(mac->dma_if),
+			       count);
+
+	mac->rx->next_to_fill += count;
+}
+
+static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
+{
+	unsigned int i, j;
+	struct pas_dma_xct_descr descr;
+	struct pasemi_mac_buffer *info;
+	struct sk_buff *skb;
+	unsigned int len;
+	int start;
+	int count;
+	dma_addr_t dma;
+
+	start = mac->rx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < (start + mac->rx->count) && count < limit; i++) {
+		rmb();
+		descr = DESCR(mac->rx, i);
+		if (!(descr.macrx & XCT_MACRX_O))
+			break;
+
+		count++;
+
+		info = NULL;
+
+		/* We have to scan for our skb since there's no way
+		 * to back-map them from the descriptor, and if we
+		 * have several receive channels then they might not
+		 * show up in the same order as they were put on the
+		 * interface ring.
+		 */
+
+		dma = (descr.ptr & XCT_PTR_ADDR_M);
+		for (j = start; j < (start + mac->rx->count); j++) {
+			if (INFO(mac->rx, j).dma == dma) {
+				info = &INFO(mac->rx, j);
+				break;
+			}
+		}
+
+		WARN_ON(!info);
+
+		skb = info->skb;
+
+		len = (descr.macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
+
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, mac->netdev);
+
+		if ((descr.macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
+			skb->ip_summed = CHECKSUM_COMPLETE;
+			skb->csum = (descr.macrx & XCT_MACRX_CSUM_M) >>
+					   XCT_MACRX_CSUM_S;
+		} else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		mac->stats.rx_bytes += len;
+		mac->stats.rx_packets++;
+
+		netif_receive_skb(skb);
+
+		DESCR(mac->rx, i).ptr = 0;
+		DESCR(mac->rx, i).macrx = 0;
+		info->dma = 0;
+		info->skb = 0;
+	}
+
+	mac->rx->next_to_clean += count;
+	pasemi_mac_replenish_rx_ring(mac->netdev);
+
+	return count;
+}
+
+static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
+{
+	int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	int start;
+	int count;
+
+	start = mac->tx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < mac->tx->next_to_use; i++) {
+		dp = &DESCR(mac->tx, i);
+		if (!dp || (dp->mactx & XCT_MACTX_O))
+			break;
+
+		count++;
+
+		info = &INFO(mac->tx, i);
+
+		dev_kfree_skb_irq(info->skb);
+		info->skb = NULL;
+		info->dma = 0;
+		dp->mactx = 0;
+		dp->ptr = 0;
+	}
+	mac->tx->next_to_clean += count;
+	return count;
+}
+
+
+static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+
+	netif_rx_schedule(dev);
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
+
+	reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
+	      PAS_IOB_DMA_RXCH_RESET_DINTC;
+	if (*mac->rx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev,
+			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
+
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+	int was_full;
+
+	was_full = mac->tx->next_to_clean + mac->tx->count == mac->tx->next_to_use;
+
+	pasemi_mac_clean_tx(mac);
+
+	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
+	if (*mac->tx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
+			       reg);
+
+	if (was_full)
+		netif_wake_queue(dev);
+
+	return IRQ_HANDLED;
+}
+
+static int pasemi_mac_open(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+	int ret;
+
+	/* enable rx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
+			       PAS_DMA_COM_RXCMD_EN);
+
+	/* enable tx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
+			       PAS_DMA_COM_TXCMD_EN);
+
+	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
+		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
+		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
+
+	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
+		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
+
+	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
+			       PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+
+	ret = pasemi_mac_setup_rx_resources(dev);
+	if (ret)
+		goto out_rx_resources;
+
+	ret = pasemi_mac_setup_tx_resources(dev);
+	if (ret)
+		goto out_tx_resources;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
+			       PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
+			       PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
+
+	/* enable rx if */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+			       PAS_DMA_RXINT_RCMDSTA_EN);
+
+	/* enable rx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+			       PAS_DMA_RXCHAN_CCMDSTA_EN |
+			       PAS_DMA_RXCHAN_CCMDSTA_DU);
+
+	/* enable tx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_EN);
+
+	pasemi_mac_replenish_rx_ring(dev);
+
+	netif_start_queue(dev);
+	netif_poll_enable(dev);
+
+	ret = request_irq(mac->dma_pdev->irq + mac->dma_txch,
+			  &pasemi_mac_tx_intr, IRQF_DISABLED,
+			  "pasemi_mac tx", dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + mac->dma_txch, ret);
+		goto out_tx_int;
+	}
+
+	ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch,
+			  &pasemi_mac_rx_intr, IRQF_DISABLED,
+			  "pasemi_mac rx", dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
+		goto out_rx_int;
+	}
+
+	return 0;
+
+out_rx_int:
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+out_tx_int:
+	netif_poll_disable(dev);
+	netif_stop_queue(dev);
+	pasemi_mac_free_tx_resources(dev);
+out_tx_resources:
+	pasemi_mac_free_rx_resources(dev);
+out_rx_resources:
+
+	return ret;
+}
+
+static int pasemi_mac_close(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int stat;
+
+	netif_stop_queue(dev);
+
+	/* Clean out any pending buffers */
+	pasemi_mac_clean_tx(mac);
+	pasemi_mac_clean_rx(mac, mac->rx->count);
+
+	/* Disable interface */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+		      PAS_DMA_RXINT_RCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+		      PAS_DMA_RXCHAN_CCMDSTA_ST);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+				      &stat);
+	} while (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+				      &stat);
+	} while (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+				      &stat);
+	} while (stat & PAS_DMA_RXINT_RCMDSTA_ACT);
+
+	/* Then, disable the channel. This must be done separately from
+	 * stopping, since you can't disable when active.
+	 */
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+	free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
+
+	/* Free resources */
+	pasemi_mac_free_rx_resources(dev);
+	pasemi_mac_free_tx_resources(dev);
+
+	return 0;
+}
+
+static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	struct pasemi_mac_txring *txring;
+	u64 flags;
+	dma_addr_t map;
+
+	if (mac->tx->next_to_clean + mac->tx->count == mac->tx->next_to_use) {
+		pasemi_mac_clean_tx(mac);
+		/* Still no room -- stop the queue and wait for tx intr when there's
+		 * room.
+		 */
+		if (mac->tx->next_to_clean + mac->tx->count == mac->tx->next_to_use) {
+			netif_stop_queue(dev);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	mac->stats.tx_packets++;
+	mac->stats.tx_bytes += skb->len;
+
+	txring = mac->tx;
+
+	flags = XCT_MACTX_O | XCT_MACTX_ST |
+		XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		switch (skb->nh.iph->protocol) {
+		case IPPROTO_TCP:
+			flags |= XCT_MACTX_CSUM_TCP;
+			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		case IPPROTO_UDP:
+			flags |= XCT_MACTX_CSUM_UDP;
+			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		}
+	}
+
+	map = virt_to_phys(skb->data);
+
+	DESCR(txring, txring->next_to_use).mactx = flags |
+						XCT_MACTX_LLEN(skb->len);
+	DESCR(txring, txring->next_to_use).ptr = XCT_PTR_LEN(skb->len) |
+						XCT_PTR_ADDR(map);
+	INFO(txring, txring->next_to_use).dma = map;
+	INFO(txring, txring->next_to_use).skb = skb;
+	/* XXXOJN Deal with fragmented packets when larger MTU is supported */
+
+	txring->next_to_use++;
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
+
+	return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	return &mac->stats;
+}
+
+static void pasemi_mac_set_rx_mode(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+
+	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
+
+	/* Set promiscuous */
+	if (dev->flags & IFF_PROMISC)
+		flags |= PAS_MAC_CFG_PCFG_PR;
+	else
+		flags &= ~PAS_MAC_CFG_PCFG_PR;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+}
+
+
+static int pasemi_mac_poll(struct net_device *dev, int *budget)
+{
+	int pkts, limit = min(*budget, dev->quota);
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	pkts = pasemi_mac_clean_rx(mac, limit);
+
+	if (pkts < limit) {
+		/* all done, no more packets present */
+		netif_rx_complete(dev);
+
+		/* re-enable receive interrupts */
+		pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+				       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+		return 0;
+	} else {
+		/* used up our quantum, so reschedule */
+		dev->quota -= pkts;
+		*budget -= pkts;
+		return 1;
+	}
+}
+
+static int __devinit
+pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int index = 0;
+	struct net_device *dev;
+	struct pasemi_mac *mac;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pasemi_mac: Could not enable device.\n");
+		return err;
+	}
+	dev = alloc_etherdev(sizeof(struct pasemi_mac));
+	if (dev == NULL) {
+		dev_err(&pdev->dev,
+			"pasemi_mac: Could not allocate ethernet device.\n");
+		return -ENOMEM;
+	}
+	SET_MODULE_OWNER(dev);
+
+	pci_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	mac = netdev_priv(dev);
+
+	mac->pdev = pdev;
+	mac->netdev = dev;
+	mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
+
+	if (!mac->dma_pdev) {
+		dev_err(&pdev->dev, "Can't find DMA Controller\n");
+		free_netdev(dev);
+		return -ENODEV;
+	}
+
+	mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
+
+	if (!mac->iob_pdev) {
+		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
+		free_netdev(dev);
+		return -ENODEV;
+	}
+
+	/* These should come out of the device tree eventually */
+	mac->dma_txch = index;
+	mac->dma_rxch = index;
+
+	/* We probe GMAC before XAUI, but the DMA interfaces are
+	 * in XAUI, GMAC order.
+	 */
+	if (index < 4)
+		mac->dma_if = index + 2;
+	else
+		mac->dma_if = index - 4;
+	index++;
+
+	switch (pdev->device) {
+	case 0xa005:
+		mac->type = MAC_TYPE_GMAC;
+		break;
+	case 0xa006:
+		mac->type = MAC_TYPE_XAUI;
+		break;
+	default:
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* get mac addr from device tree */
+	if (pasemi_set_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
+		err = -ENODEV;
+		goto out;
+	}
+	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+
+	dev->open = pasemi_mac_open;
+	dev->stop = pasemi_mac_close;
+	dev->hard_start_xmit = pasemi_mac_start_tx;
+	dev->get_stats = pasemi_mac_get_stats;
+	dev->set_multicast_list = pasemi_mac_set_rx_mode;
+	dev->weight = 64;
+	dev->poll = pasemi_mac_poll;
+	dev->features = NETIF_F_HW_CSUM;
+
+	/* The dma status structure is located in the I/O bridge, and
+	 * is cache coherent.
+	 */
+	if (!dma_status)
+		/* XXXOJN This should come from the device tree */
+		dma_status = __ioremap(0xfd800000, 0x1000, 0);
+
+	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
+	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
+
+	err = register_netdev(dev);
+
+	if (err) {
+		dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
+			err);
+		goto out;
+	} else
+		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
+		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
+		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
+		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+	return err;
+
+out:
+	dev_err(&mac->pdev->dev, "pasemi_mac: init failed\n");
+
+	pci_disable_device(pdev);
+	pci_dev_put(mac->dma_pdev);
+	pci_dev_put(mac->iob_pdev);
+	free_netdev(dev);
+	return err;
+}
+
+static struct pci_device_id pasemi_mac_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
+
+static struct pci_driver pasemi_mac_driver = {
+	.name		= "pasemi_mac",
+	.id_table	= pasemi_mac_pci_tbl,
+	.probe		= pasemi_mac_probe,
+};
+
+static void __exit pasemi_mac_cleanup_module(void)
+{
+	pci_unregister_driver(&pasemi_mac_driver);
+}
+
+int pasemi_mac_init_module(void)
+{
+	return pci_register_driver(&pasemi_mac_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
+
+module_init(pasemi_mac_init_module);
+module_exit(pasemi_mac_cleanup_module);
Index: merge/drivers/net/pasemi_mac.h
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) 2006 PA Semi, Inc
+ *
+ * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef PASEMI_MAC_H
+#define PASEMI_MAC_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct pasemi_mac_txring {
+	struct pas_dma_xct_descr	*desc;
+	dma_addr_t	 dma;
+	unsigned int	 size;
+	unsigned int	 count;
+	unsigned int	 next_to_use;
+	unsigned int	 next_to_clean;
+	unsigned short	 last_count;
+	struct pasemi_mac_buffer *desc_info;
+};
+
+struct pasemi_mac_rxring {
+	struct pas_dma_xct_descr	*desc;	/* RX channel descriptor ring */
+	dma_addr_t	 dma;
+	u64		*buffers;	/* RX interface buffer ring */
+	dma_addr_t	 buf_dma;
+	unsigned int	 size;
+	unsigned int	 count;
+	unsigned int	 next_to_fill;
+	unsigned int	 next_to_clean;
+	unsigned short	 last_count;
+	struct pasemi_mac_buffer *desc_info;
+};
+
+/* Number of unused descriptors, considering ring wraparounds */
+#define PASEMI_MAC_DESC_UNUSED(ring) ((((ring)->next_to_clean >		\
+					(ring)->next_to_use) ?		\
+					  0 :				\
+					  (ring)->count) +		\
+					  (ring)->next_to_clean -	\
+					  (ring)->next_to_use - 1)
+
+#define DESCR(ring, i) ((ring)->desc[i % ((ring)->count)])
+#define BUFF(ring, i) ((ring)->buffers[i % ((ring)->count)])
+#define INFO(ring, i) ((ring)->desc_info[i % ((ring)->count)])
+
+struct pasemi_mac {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct pci_dev *dma_pdev;
+	struct pci_dev *iob_pdev;
+	struct net_device_stats stats;
+
+	/* Pointer to the cacheable per-channel status registers */
+	u64	*rx_status;
+	u64	*tx_status;
+
+	u8		type;
+#define MAC_TYPE_GMAC	1
+#define MAC_TYPE_XAUI	2
+	u32	dma_txch;
+	u32	dma_if;
+	u32	dma_rxch;
+
+	u8		mac_addr[6];
+
+	struct timer_list	rxtimer;
+
+	struct pasemi_mac_txring *tx;
+	struct pasemi_mac_rxring *rx;
+};
+
+struct pasemi_mac_buffer {
+	struct sk_buff *skb;
+	dma_addr_t	dma;
+};
+
+
+
+#define PAS_MAC_CFG_PCFG		0x80
+#define    PAS_MAC_CFG_PCFG_PE		0x80000000
+#define    PAS_MAC_CFG_PCFG_CE		0x40000000
+#define    PAS_MAC_CFG_PCFG_BU		0x20000000
+#define    PAS_MAC_CFG_PCFG_TT		0x10000000
+#define    PAS_MAC_CFG_PCFG_TSR_M	0x0c000000
+#define    PAS_MAC_CFG_PCFG_TSR_10M	0x00000000
+#define    PAS_MAC_CFG_PCFG_TSR_100M	0x04000000
+#define    PAS_MAC_CFG_PCFG_TSR_1G	0x08000000
+#define    PAS_MAC_CFG_PCFG_TSR_10G	0x0c000000
+#define    PAS_MAC_CFG_PCFG_T24		0x02000000
+#define    PAS_MAC_CFG_PCFG_PR		0x01000000
+#define    PAS_MAC_CFG_PCFG_CRO_M	0x00ff0000
+#define    PAS_MAC_CFG_PCFG_CRO_S	16
+#define    PAS_MAC_CFG_PCFG_IPO_M	0x0000ff00
+#define    PAS_MAC_CFG_PCFG_IPO_S	8
+#define    PAS_MAC_CFG_PCFG_S1		0x00000080
+#define    PAS_MAC_CFG_PCFG_IO_M	0x00000060
+#define    PAS_MAC_CFG_PCFG_IO_MAC	0x00000000
+#define    PAS_MAC_CFG_PCFG_IO_OFF	0x00000020
+#define    PAS_MAC_CFG_PCFG_IO_IND_ETH	0x00000040
+#define    PAS_MAC_CFG_PCFG_IO_IND_IP	0x00000060
+#define    PAS_MAC_CFG_PCFG_LP		0x00000010
+#define    PAS_MAC_CFG_PCFG_TS		0x00000008
+#define    PAS_MAC_CFG_PCFG_HD		0x00000004
+#define    PAS_MAC_CFG_PCFG_SPD_M	0x00000003
+#define    PAS_MAC_CFG_PCFG_SPD_10M	0x00000000
+#define    PAS_MAC_CFG_PCFG_SPD_100M	0x00000001
+#define    PAS_MAC_CFG_PCFG_SPD_1G	0x00000002
+#define    PAS_MAC_CFG_PCFG_SPD_10G	0x00000003
+#define PAS_MAC_CFG_TXP			0x98
+#define    PAS_MAC_CFG_TXP_FCF		0x01000000
+#define    PAS_MAC_CFG_TXP_FCE		0x00800000
+#define    PAS_MAC_CFG_TXP_FC		0x00400000
+#define    PAS_MAC_CFG_TXP_FPC_M	0x00300000
+#define    PAS_MAC_CFG_TXP_FPC_S	20
+#define    PAS_MAC_CFG_TXP_FPC(x)	(((x) << PAS_MAC_CFG_TXP_FPC_S) & PAS_MAC_CFG_TXP_FPC_M)
+#define    PAS_MAC_CFG_TXP_RT		0x00080000
+#define    PAS_MAC_CFG_TXP_BL		0x00040000
+#define    PAS_MAC_CFG_TXP_SL_M		0x00030000
+#define    PAS_MAC_CFG_TXP_SL_S		16
+#define    PAS_MAC_CFG_TXP_SL(x)	(((x) << PAS_MAC_CFG_TXP_SL_S) & PAS_MAC_CFG_TXP_SL_M)
+#define    PAS_MAC_CFG_TXP_COB_M	0x0000f000
+#define    PAS_MAC_CFG_TXP_COB_S	12
+#define    PAS_MAC_CFG_TXP_COB(x)	(((x) << PAS_MAC_CFG_TXP_COB_S) & PAS_MAC_CFG_TXP_COB_M)
+#define    PAS_MAC_CFG_TXP_TIFT_M	0x00000f00
+#define    PAS_MAC_CFG_TXP_TIFT_S	8
+#define    PAS_MAC_CFG_TXP_TIFT(x)	(((x) << PAS_MAC_CFG_TXP_TIFT_S) & PAS_MAC_CFG_TXP_TIFT_M)
+#define    PAS_MAC_CFG_TXP_TIFG_M	0x000000ff
+#define    PAS_MAC_CFG_TXP_TIFG_S	0
+#define    PAS_MAC_CFG_TXP_TIFG(x)	(((x) << PAS_MAC_CFG_TXP_TIFG_S) & PAS_MAC_CFG_TXP_TIFG_M)
+
+#define PAS_MAC_IPC_CHNL		0x208
+#define    PAS_MAC_IPC_CHNL_DCHNO_M	0x003f0000
+#define    PAS_MAC_IPC_CHNL_DCHNO_S	16
+#define    PAS_MAC_IPC_CHNL_DCHNO(x)	(((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
+					 PAS_MAC_IPC_CHNL_DCHNO_M)
+#define    PAS_MAC_IPC_CHNL_BCH_M	0x0000003f
+#define    PAS_MAC_IPC_CHNL_BCH_S	0
+#define    PAS_MAC_IPC_CHNL_BCH(x)	(((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
+					 PAS_MAC_IPC_CHNL_BCH_M)
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+
+#define PAS_DMA_COM_TXCMD	0x100	/* Transmit Command Register  */
+#define    PAS_DMA_COM_TXCMD_EN		0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA	0x104	/* Transmit Status Register   */
+#define    PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD	0x108	/* Receive Command Register   */
+#define    PAS_DMA_COM_RXCMD_EN		0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA	0x10c	/* Receive Status Register    */
+#define    PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
+
+
+#define _PAS_DMA_RXINT_STRIDE		0x20
+#define PAS_DMA_RXINT_RCMDSTA(i)	(0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_RCMDSTA_EN	0x00000001
+#define    PAS_DMA_RXINT_RCMDSTA_ST	0x00000002
+#define    PAS_DMA_RXINT_RCMDSTA_OO	0x00000100
+#define    PAS_DMA_RXINT_RCMDSTA_BP	0x00000200
+#define    PAS_DMA_RXINT_RCMDSTA_DR	0x00000400
+#define    PAS_DMA_RXINT_RCMDSTA_BT	0x00000800
+#define    PAS_DMA_RXINT_RCMDSTA_TB	0x00001000
+#define    PAS_DMA_RXINT_RCMDSTA_ACT	0x00010000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_M	0xfffe0000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_S	17
+#define PAS_DMA_RXINT_INCR(i)		(0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_INCR_INCR_M	0x0000ffff
+#define    PAS_DMA_RXINT_INCR_INCR_S	0
+#define    PAS_DMA_RXINT_INCR_INCR(x)	((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i)		(0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEL_BRBL(x)	((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i)		(0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEU_BRBH(x)	((x) & 0xfff)
+#define    PAS_DMA_RXINT_BASEU_SIZ_M	0x3fff0000	/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXINT_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXINT_BASEU_SIZ(x)	(((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+					 PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
+#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
+#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
+#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
+#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
+#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
+#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
+#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
+#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+					 PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
+#define    PAS_DMA_TXCHAN_CFG_WT_S	6
+#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+					 PAS_DMA_TXCHAN_CFG_WT_M)
+#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
+#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
+#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
+#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_RXCHAN_CCMDSTA	0x800	/* Command / Status		*/
+#define _PAS_DMA_RXCHAN_CFG	0x804	/* Configuration		*/
+#define _PAS_DMA_RXCHAN_INCR	0x810	/* Descriptor increment		*/
+#define _PAS_DMA_RXCHAN_CNT	0x814	/* Descriptor count/offset	*/
+#define _PAS_DMA_RXCHAN_BASEL	0x818	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_RXCHAN_BASEU	0x81c	/*			(high)	*/
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ACT	0x00010000	/* Active */
+#define    PAS_DMA_RXCHAN_CCMDSTA_DU	0x00020000
+#define PAS_DMA_RXCHAN_CFG(c)     (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CFG_HBU_M	0x00000380
+#define    PAS_DMA_RXCHAN_CFG_HBU_S	7
+#define    PAS_DMA_RXCHAN_CFG_HBU(x)	(((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+					 PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c)    (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c)   (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c)   (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_RXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+	u64 rx_sta[64];
+	u64 tx_sta[20];
+};
+
+#define    PAS_STATUS_PCNT_M		0x000000000000ffff
+#define    PAS_STATUS_PCNT_S		0
+#define    PAS_STATUS_DCNT_M		0x00000000ffff0000
+#define    PAS_STATUS_DCNT_S		16
+#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000
+#define    PAS_STATUS_BPCNT_S		32
+#define    PAS_STATUS_TIMER		0x1000000000000000
+#define    PAS_STATUS_ERROR		0x2000000000000000
+#define    PAS_STATUS_SOFT		0x4000000000000000
+#define    PAS_STATUS_INT		0x8000000000000000
+
+#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+struct pas_dma_xct_descr {
+	union {
+ 		u64	mactx;
+#define	XCT_MACTX_T		0x8000000000000000
+#define	XCT_MACTX_ST		0x4000000000000000
+#define XCT_MACTX_NORES		0x0000000000000000
+#define XCT_MACTX_8BRES		0x1000000000000000
+#define XCT_MACTX_24BRES	0x2000000000000000
+#define XCT_MACTX_40BRES	0x3000000000000000
+#define XCT_MACTX_I		0x0800000000000000
+#define XCT_MACTX_O		0x0400000000000000
+#define XCT_MACTX_E		0x0200000000000000
+#define XCT_MACTX_VLAN_M	0x0180000000000000
+#define XCT_MACTX_VLAN_NOP	0x0000000000000000
+#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000
+#define XCT_MACTX_VLAN_INSERT   0x0100000000000000
+#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000
+#define XCT_MACTX_CRC_M		0x0060000000000000
+#define XCT_MACTX_CRC_NOP	0x0000000000000000
+#define XCT_MACTX_CRC_INSERT	0x0020000000000000
+#define XCT_MACTX_CRC_PAD	0x0040000000000000
+#define XCT_MACTX_CRC_REPLACE	0x0060000000000000
+#define XCT_MACTX_SS		0x0010000000000000
+#define XCT_MACTX_LLEN_M	0x00007fff00000000
+#define XCT_MACTX_LLEN_S	32ull
+#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M		0x00000000f8000000
+#define XCT_MACTX_IPH_S		27ull
+#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M		0x0000000007c00000
+#define XCT_MACTX_IPO_S		22ull
+#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M	0x0000000000000060
+#define XCT_MACTX_CSUM_NOP	0x0000000000000000
+#define XCT_MACTX_CSUM_TCP	0x0000000000000040
+#define XCT_MACTX_CSUM_UDP	0x0000000000000060
+#define XCT_MACTX_V6		0x0000000000000010
+#define XCT_MACTX_C		0x0000000000000004
+#define XCT_MACTX_AL2		0x0000000000000002
+		u64	macrx;
+#define	XCT_MACRX_T		0x8000000000000000
+#define	XCT_MACRX_ST		0x4000000000000000
+#define XCT_MACRX_NORES		0x0000000000000000
+#define XCT_MACRX_8BRES		0x1000000000000000
+#define XCT_MACRX_24BRES	0x2000000000000000
+#define XCT_MACRX_40BRES	0x3000000000000000
+#define XCT_MACRX_O		0x0400000000000000
+#define XCT_MACRX_E		0x0200000000000000
+#define XCT_MACRX_FF		0x0100000000000000
+#define XCT_MACRX_PF		0x0080000000000000
+#define XCT_MACRX_OB		0x0040000000000000
+#define XCT_MACRX_OD		0x0020000000000000
+#define XCT_MACRX_FS		0x0010000000000000
+#define XCT_MACRX_NB_M		0x000fc00000000000
+#define XCT_MACRX_NB_S		46ULL
+#define XCT_MACRX_NB(x)		((((long)(x)) << XCT_MACRX_NB_S) & XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M	0x00003fff00000000
+#define XCT_MACRX_LLEN_S	32ULL
+#define XCT_MACRX_LLEN(x)	((((long)(x)) << XCT_MACRX_LLEN_S) & XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC		0x0000000080000000
+#define XCT_MACRX_LEN_M		0x0000000060000000
+#define XCT_MACRX_LEN_TOOSHORT	0x0000000020000000
+#define XCT_MACRX_LEN_BELOWMIN	0x0000000040000000
+#define XCT_MACRX_LEN_TRUNC	0x0000000060000000
+#define XCT_MACRX_CAST_M	0x0000000018000000
+#define XCT_MACRX_CAST_UNI	0x0000000000000000
+#define XCT_MACRX_CAST_MULTI	0x0000000008000000
+#define XCT_MACRX_CAST_BROAD	0x0000000010000000
+#define XCT_MACRX_CAST_PAUSE	0x0000000018000000
+#define XCT_MACRX_VLC_M		0x0000000006000000
+#define XCT_MACRX_FM		0x0000000001000000
+#define XCT_MACRX_HTY_M		0x0000000000c00000
+#define XCT_MACRX_HTY_IPV4_OK	0x0000000000000000
+#define XCT_MACRX_HTY_IPV6 	0x0000000000400000
+#define XCT_MACRX_HTY_IPV4_BAD	0x0000000000800000
+#define XCT_MACRX_HTY_NONIP	0x0000000000c00000
+#define XCT_MACRX_IPP_M		0x00000000003f0000
+#define XCT_MACRX_IPP_S		16
+#define XCT_MACRX_CSUM_M	0x000000000000ffff
+#define XCT_MACRX_CSUM_S	0
+	};
+	union {
+		u64	ptr;
+#define XCT_PTR_T		0x8000000000000000
+#define XCT_PTR_LEN_M		0x7ffff00000000000
+#define XCT_PTR_LEN_S		44
+#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M		0x00000fffffffffff
+#define XCT_PTR_ADDR_S		0
+#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+		u64	rxb;
+#define XCT_RXB_LEN_M		0x0ffff00000000000
+#define XCT_RXB_LEN_S		44
+#define XCT_RXB_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_RXB_ADDR_M		0x00000fffffffffff
+#define XCT_RXB_ADDR_S		0
+#define XCT_RXB_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+	};
+};
+
+#endif /* PASEMI_MAC_H */
Index: merge/MAINTAINERS
===================================================================
--- merge.orig/MAINTAINERS
+++ merge/MAINTAINERS
@@ -2484,6 +2484,12 @@ L:	orinoco-devel@lists.sourceforge.net
 W:	http://www.nongnu.org/orinoco/
 S:	Maintained
 
+PA SEMI ETHERNET DRIVER
+P:	Olof Johansson
+M:	olof@lixom.net
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 PARALLEL PORT SUPPORT
 P:	Phil Blundell
 M:	philb@gnu.org
Index: merge/include/linux/pci_ids.h
===================================================================
--- merge.orig/include/linux/pci_ids.h
+++ merge/include/linux/pci_ids.h
@@ -2064,6 +2064,8 @@
 #define PCI_VENDOR_ID_TDI               0x192E
 #define PCI_DEVICE_ID_TDI_EHCI          0x0101
 
+#define PCI_VENDOR_ID_PASEMI		0x1959
+
 #define PCI_VENDOR_ID_JMICRON		0x197B
 #define PCI_DEVICE_ID_JMICRON_JMB360	0x2360
 #define PCI_DEVICE_ID_JMICRON_JMB361	0x2361

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-30  1:41   ` Olof Johansson
@ 2007-01-30  2:34     ` Jeff Garzik
  2007-01-30 20:53       ` Olof Johansson
  0 siblings, 1 reply; 23+ messages in thread
From: Jeff Garzik @ 2007-01-30  2:34 UTC (permalink / raw)
  To: Olof Johansson; +Cc: Stephen Hemminger, netdev

Olof Johansson wrote:
> On Mon, Jan 29, 2007 at 10:22:33AM -0800, Stephen Hemminger wrote:
>> Basic initalization, setup comments.
> 
> Thanks, fixes have been incorporated and will be reposted. Most of them
> was obviously just my lack of diligence. See however the two below.
> 
>>> +static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
>>> +{
>>> +	struct net_device *dev = data;
>>> +	struct pasemi_mac *mac = netdev_priv(dev);
>>> +	unsigned int reg;
>>> +
>>> +	pasemi_mac_clean_tx(mac);
>>> +
>>> +	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
>>> +	if (*mac->tx_status & PAS_STATUS_TIMER)
>>> +		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
>>> +
>>> +	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
>>> +			       reg);
>>> +
>>> +	return IRQ_HANDLED;
>>> +}
>> To do shared IRQ's properly you need to check to see if
>> this is your device IRQ or not. Maybe reading config value?
> 
> Right now it's guaranteed that the interrupts will not be shared. They're
> fixed for the on-chip devices, and no other driver should be binding to
> the same channels (and thus irqs).
> 
> If it changes in the future, the driver would need other rework as well.

Nonetheless, it is far more sane to check for work, and return if no 
work.  Who knows if the hardware will signal an interrupt early or late.


>>> +static struct pci_driver pasemi_mac_driver = {
>>> +   .name = "pasemi_mac",
>>> +   .id_table = pasemi_mac_pci_tbl,
>>> +   .probe = pasemi_mac_probe,
>> Don't you need a remove routine?
> 
> No hotplug support at this time, so I didn't see any use in providing one.

module remove.

	Jeff




^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-29  6:08 [PATCH] PA Semi PWRficient Ethernet driver Olof Johansson
                   ` (2 preceding siblings ...)
  2007-01-30  1:44 ` [PATCH] [v2]PA " Olof Johansson
@ 2007-01-30 10:03 ` Christoph Hellwig
  2007-01-30 15:36   ` Olof Johansson
  3 siblings, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2007-01-30 10:03 UTC (permalink / raw)
  To: Olof Johansson; +Cc: jgarzik, netdev

On Mon, Jan 29, 2007 at 12:08:52AM -0600, Olof Johansson wrote:
> Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)
> 
> Basic enablement, will be complemented with performance enhancements
> over time. PHY support will be added as well.
> 
> This patch still uses the numerical PCI vendor id, it will be replaced
> when the pci_ids.h change goes in (same as the other currently pending
> drivers).

>From a quick glance over the code you don't seem to handle memory allocation
erros at all, that needs some fixing.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-30  1:41   ` Olof Johansson
@ 2007-01-30 10:06     ` Christoph Hellwig
  2007-01-30 15:34       ` Olof Johansson
  2007-01-30 21:45     ` Francois Romieu
  1 sibling, 1 reply; 23+ messages in thread
From: Christoph Hellwig @ 2007-01-30 10:06 UTC (permalink / raw)
  To: Olof Johansson; +Cc: Francois Romieu, jgarzik, netdev

On Mon, Jan 29, 2007 at 07:41:16PM -0600, Olof Johansson wrote:
> Maybe a bit of introduction could be useful (also regarding the
> pci_map/alloc comments below).
> 
> Our devices are on-chip, and while they're not on a PCI(e) bus internally,
> they do have config headers and will show up as devices on a pseudo-bus
> (the root one, in fact).
> 
> Also, while the driver could go through the IOMMU layers, there's no
> real use in doing so at this time.

sorry, but bypassing this is not what what we want in drivers at all.
In case you have an iommu and don't want it on the root bus chose
the noop dma implementation for this bus, which is easily possible on
powerpc.


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-30 10:06     ` Christoph Hellwig
@ 2007-01-30 15:34       ` Olof Johansson
  0 siblings, 0 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-30 15:34 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Francois Romieu, jgarzik, netdev

On Tue, Jan 30, 2007 at 10:06:49AM +0000, Christoph Hellwig wrote:
> On Mon, Jan 29, 2007 at 07:41:16PM -0600, Olof Johansson wrote:
> > Maybe a bit of introduction could be useful (also regarding the
> > pci_map/alloc comments below).
> > 
> > Our devices are on-chip, and while they're not on a PCI(e) bus internally,
> > they do have config headers and will show up as devices on a pseudo-bus
> > (the root one, in fact).
> > 
> > Also, while the driver could go through the IOMMU layers, there's no
> > real use in doing so at this time.
> 
> sorry, but bypassing this is not what what we want in drivers at all.
> In case you have an iommu and don't want it on the root bus chose
> the noop dma implementation for this bus, which is easily possible on
> powerpc.

Good point, I'll do that instead.


-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-30 10:03 ` [PATCH] " Christoph Hellwig
@ 2007-01-30 15:36   ` Olof Johansson
  0 siblings, 0 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-30 15:36 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: jgarzik, netdev

On Tue, Jan 30, 2007 at 10:03:58AM +0000, Christoph Hellwig wrote:
> On Mon, Jan 29, 2007 at 12:08:52AM -0600, Olof Johansson wrote:
> > Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)
> > 
> > Basic enablement, will be complemented with performance enhancements
> > over time. PHY support will be added as well.
> > 
> > This patch still uses the numerical PCI vendor id, it will be replaced
> > when the pci_ids.h change goes in (same as the other currently pending
> > drivers).
> 
> >From a quick glance over the code you don't seem to handle memory allocation
> erros at all, that needs some fixing.

v2 that I posted as a comment to the first one should have taken care
of most cases. I'll go through once more and see if I missed any before
posting a revised version later today.


Thanks,

-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-30  2:34     ` Jeff Garzik
@ 2007-01-30 20:53       ` Olof Johansson
  0 siblings, 0 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-30 20:53 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Stephen Hemminger, netdev

On Mon, Jan 29, 2007 at 09:34:06PM -0500, Jeff Garzik wrote:
> Olof Johansson wrote:
> >Right now it's guaranteed that the interrupts will not be shared. They're
> >fixed for the on-chip devices, and no other driver should be binding to
> >the same channels (and thus irqs).
> >
> >If it changes in the future, the driver would need other rework as well.
> 
> Nonetheless, it is far more sane to check for work, and return if no 
> work.  Who knows if the hardware will signal an interrupt early or late.

Easy enough. Added in the next version.

> >>>+static struct pci_driver pasemi_mac_driver = {
> >>>+   .name = "pasemi_mac",
> >>>+   .id_table = pasemi_mac_pci_tbl,
> >>>+   .probe = pasemi_mac_probe,
> >>Don't you need a remove routine?
> >
> >No hotplug support at this time, so I didn't see any use in providing one.
> 
> module remove.

Oh, good point. Added in the next version to be posted.


Thanks,

-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-30  1:41   ` Olof Johansson
  2007-01-30 10:06     ` Christoph Hellwig
@ 2007-01-30 21:45     ` Francois Romieu
  2007-01-31  4:52       ` Olof Johansson
  1 sibling, 1 reply; 23+ messages in thread
From: Francois Romieu @ 2007-01-30 21:45 UTC (permalink / raw)
  To: Olof Johansson; +Cc: jgarzik, netdev

Olof Johansson <olof@lixom.net> :
> On Mon, Jan 29, 2007 at 11:35:06PM +0100, Francois Romieu wrote:
[...]
> > - The driver does not contain a single SMP locking instruction but
> >   http://www.pasemi.com claims the platform to be multicore.
> >   Is the driver really designed to be lockless ?
> 
> Unless I misunderstood something, NAPI drivers that don't set NETIF_F_LLTX
> will have all locking taken care of by higher layers, no?

It is not necessarily _that_ simple (it would be cool though :o) ).

For instance, what does prevent pasemi_mac_clean_tx() to be issued
from IRQ context (pasemi_mac_tx_intr) and from the xmit handler
(pasemi_mac_start_tx) at the same time ?

[...]
> > unsigned int is supposed to save some cycles on ppc.
> 
> Who told you that? That's not true.

Jon D Mason <jonmason@us.ibm.com> on 25/08/2004 about ppc64 (not ppc, sorry).

[...]
> > > +#define DESCR(ring, i) ((ring)->desc[i % ((ring)->count)])
> > > +#define BUFF(ring, i) ((ring)->buffers[i % ((ring)->count)])
> > > +#define INFO(ring, i) ((ring)->desc_info[i % ((ring)->count)])
> > 
> > A bit ugly/obfuscating/name clash prone imvho.
> > 
> > Use local variables ?
> 
> I'm open for suggestions here, not sure how local variables will help though?

	struct pas_dma_xct_descr *desc = ring->desc[i % ring->count];

-- 
Ueimor

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] PA Semi PWRficient Ethernet driver
  2007-01-30 21:45     ` Francois Romieu
@ 2007-01-31  4:52       ` Olof Johansson
  0 siblings, 0 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-31  4:52 UTC (permalink / raw)
  To: Francois Romieu; +Cc: jgarzik, netdev

On Tue, Jan 30, 2007 at 10:45:18PM +0100, Francois Romieu wrote:
> Olof Johansson <olof@lixom.net> :
> > On Mon, Jan 29, 2007 at 11:35:06PM +0100, Francois Romieu wrote:
> [...]
> > > - The driver does not contain a single SMP locking instruction but
> > >   http://www.pasemi.com claims the platform to be multicore.
> > >   Is the driver really designed to be lockless ?
> > 
> > Unless I misunderstood something, NAPI drivers that don't set NETIF_F_LLTX
> > will have all locking taken care of by higher layers, no?
> 
> It is not necessarily _that_ simple (it would be cool though :o) ).
> 
> For instance, what does prevent pasemi_mac_clean_tx() to be issued
> from IRQ context (pasemi_mac_tx_intr) and from the xmit handler
> (pasemi_mac_start_tx) at the same time ?

You're right. Bummer. I'll add locking on the rings.

> [...]
> > > unsigned int is supposed to save some cycles on ppc.
> > 
> > Who told you that? That's not true.
> 
> Jon D Mason <jonmason@us.ibm.com> on 25/08/2004 about ppc64 (not ppc, sorry).

Interesting, I hadn't thought about that before.

There's nothing architectural in PPC that makes signed math slower than
unsigned, but in the case of modulo operations (which we do alot on the
rings), unsigned is per definition more complex to do the operations on.

It's pretty much within the noise on the current implementation, but
still an interesting tidbit. Thanks.

> [...]
> > > > +#define DESCR(ring, i) ((ring)->desc[i % ((ring)->count)])
> > > > +#define BUFF(ring, i) ((ring)->buffers[i % ((ring)->count)])
> > > > +#define INFO(ring, i) ((ring)->desc_info[i % ((ring)->count)])
> > > 
> > > A bit ugly/obfuscating/name clash prone imvho.
> > > 
> > > Use local variables ?
> > 
> > I'm open for suggestions here, not sure how local variables will help though?
> 
> 	struct pas_dma_xct_descr *desc = ring->desc[i % ring->count];

That makes sense. Done.


-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] [v3] PA Semi PWRficient Ethernet driver
  2007-01-30  1:44 ` [PATCH] [v2]PA " Olof Johansson
@ 2007-01-31  5:44   ` Olof Johansson
  2007-01-31 10:34     ` Jeff Garzik
                       ` (3 more replies)
  0 siblings, 4 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-31  5:44 UTC (permalink / raw)
  To: jgarzik; +Cc: netdev, Stephen Hemminger, Francois Romieu, Christoph Hellwig

Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)

Basic enablement, will be complemented with performance enhancements
over time. PHY support will be added as well.

Signed-off-by: Olof Johansson <olof@lixom.net>

---

Further improvements based on comments:

* Added remove function
* Checking interrupt status in handler
* Misc cleanups w.r.t. ring handing (INFO/DESCR/BUFF are gone)
* Added locking of the rings
* Using PCI DMA for all buffers

Misc other changes while I was touching the code:

* Setting the interrupt descriptor field to include interface number
* Added PCI vendor ID, it's been submitted to sf.net as well.
* Moved Kconfig entry to the 10GbE section


Index: merge/drivers/net/Kconfig
===================================================================
--- merge.orig/drivers/net/Kconfig
+++ merge/drivers/net/Kconfig
@@ -2488,6 +2488,13 @@ config NETXEN_NIC
 	help
 	  This enables the support for NetXen's Gigabit Ethernet card.
 
+config PASEMI_MAC
+	tristate "PA Semi 1/10Gbit MAC"
+	depends on PPC64 && PCI
+	help
+	  This driver supports the on-chip 1/10Gbit Ethernet controller on
+	  PA Semi's PWRficient line of chips.
+
 endmenu
 
 source "drivers/net/tokenring/Kconfig"
Index: merge/drivers/net/Makefile
===================================================================
--- merge.orig/drivers/net/Makefile
+++ merge/drivers/net/Makefile
@@ -196,6 +196,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
 
 obj-$(CONFIG_MACB) += macb.o
 
Index: merge/drivers/net/pasemi_mac.c
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.c
@@ -0,0 +1,963 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+#include "pasemi_mac.h"
+
+#define INITIAL_RX_RING_SIZE 512
+#define INITIAL_TX_RING_SIZE 512
+
+#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+
+#define PAS_DMA_MAX_IF     40
+#define PAS_DMA_MAX_RXCH   8
+#define PAS_DMA_MAX_TXCH   8
+
+/* XXXOJN these should come out of the device tree some day */
+#define PAS_DMA_CAP_BASE   0xe00d0040
+#define PAS_DMA_CAP_SIZE   0x100
+#define PAS_DMA_COM_BASE   0xe00d0100
+#define PAS_DMA_COM_SIZE   0x100
+
+static struct pasdma_status *dma_status;
+
+static int pasemi_set_mac_addr(struct pasemi_mac *mac)
+{
+	struct pci_dev *pdev = mac->pdev;
+	struct device_node *dn = pci_device_to_OF_node(pdev);
+	const u8 *maddr;
+	u8 addr[6];
+
+	if (!dn) {
+		dev_dbg(&pdev->dev,
+			  "No device node for mac, not configuring\n");
+		return -ENOENT;
+	}
+
+	maddr = get_property(dn, "mac-address", NULL);
+	if (maddr == NULL) {
+		dev_warn(&pdev->dev,
+			 "no mac address in device tree, not configuring\n");
+		return -ENOENT;
+	}
+
+	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
+		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+		dev_warn(&pdev->dev,
+			 "can't parse mac address, not configuring\n");
+		return -EINVAL;
+	}
+
+	memcpy(mac->mac_addr, addr, sizeof(addr));
+	return 0;
+}
+
+static int pasemi_mac_setup_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac_rxring *ring;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	int chan_id = mac->dma_rxch;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+	if (!ring)
+		goto out_ring;
+
+	spin_lock_init(&ring->lock);
+	ring->count = INITIAL_RX_RING_SIZE;
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
+				  GFP_KERNEL);
+
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
+					ring->count *
+					sizeof(struct pas_dma_xct_descr),
+					&ring->dma, GFP_KERNEL);
+
+	if (!ring->desc)
+		goto out_desc;
+
+	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));
+
+	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
+					   ring->count * sizeof(u64),
+					   &ring->buf_dma, GFP_KERNEL);
+	if (!ring->buffers)
+		goto out_buffers;
+
+	memset(ring->buffers, 0, ring->count * sizeof(u64));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
+			       PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
+			       PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
+			       PAS_DMA_RXCHAN_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 2));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
+			       PAS_DMA_RXCHAN_CFG_HBU(1));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
+			       PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
+			       PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
+			       PAS_DMA_RXINT_BASEU_SIZ(INITIAL_RX_RING_SIZE >> 3));
+
+	ring->next_to_fill = 0;
+	ring->next_to_clean = 0;
+
+	snprintf(ring->irq_name, sizeof(ring->irq_name),
+		 "%s rx", dev->name);
+	mac->rx = ring;
+
+	return 0;
+
+out_buffers:
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  mac->rx->count * sizeof(struct pas_dma_xct_descr),
+			  mac->rx->desc, mac->rx->dma);
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+
+static int pasemi_mac_setup_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	u32 val;
+	int chan_id = mac->dma_txch;
+	struct pasemi_mac_txring *ring;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		goto out_ring;
+
+	spin_lock_init(&ring->lock);
+	ring->count = INITIAL_TX_RING_SIZE;
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer)*ring->count,
+				  GFP_KERNEL);
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
+					ring->count *
+					sizeof(struct pas_dma_xct_descr),
+					&ring->dma, GFP_KERNEL);
+	if (!ring->desc)
+		goto out_desc;
+
+	memset(ring->desc, 0, ring->count * sizeof(struct pas_dma_xct_descr));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
+			       PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
+	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
+	val |= PAS_DMA_TXCHAN_BASEU_SIZ(INITIAL_TX_RING_SIZE >> 2);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
+			       PAS_DMA_TXCHAN_CFG_TY_IFACE |
+			       PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
+			       PAS_DMA_TXCHAN_CFG_UP |
+			       PAS_DMA_TXCHAN_CFG_WT(2));
+
+	ring->next_to_use = 0;
+	ring->next_to_clean = 0;
+
+	snprintf(ring->irq_name, sizeof(ring->irq_name),
+		 "%s tx", dev->name);
+	mac->tx = ring;
+
+	return 0;
+
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+static noinline void pasemi_mac_free_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+
+	for (i = 0; i < mac->tx->count; i++) {
+		info = &mac->tx->desc_info[i % mac->tx->count];
+		dp = &mac->tx->desc[i % mac->tx->count];
+		if (info->dma) {
+			if (info->skb) {
+				pci_unmap_single(mac->dma_pdev,
+						 info->dma,
+						 info->skb->len,
+						 PCI_DMA_TODEVICE);
+				dev_kfree_skb_any(info->skb);
+			}
+			info->dma = 0;
+			info->skb = 0;
+			dp->mactx = 0;
+			dp->ptr = 0;
+		}
+	}
+
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  mac->tx->count * sizeof(struct pas_dma_xct_descr),
+			  mac->tx->desc, mac->tx->dma);
+
+	kfree(mac->tx->desc_info);
+	kfree(mac->tx);
+	mac->tx = NULL;
+}
+
+static noinline void pasemi_mac_free_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+
+	for (i = 0; i < mac->rx->count; i++) {
+		info = &mac->rx->desc_info[i % mac->rx->count];
+		dp = &mac->rx->desc[i % mac->rx->count];
+		if (info->dma) {
+			if (info->skb) {
+				pci_unmap_single(mac->dma_pdev,
+						 info->dma,
+						 info->skb->len,
+						 PCI_DMA_FROMDEVICE);
+				dev_kfree_skb_any(info->skb);
+			}
+			info->dma = 0;
+			info->skb = 0;
+			dp->macrx = 0;
+			dp->ptr = 0;
+		}
+	}
+
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  mac->rx->count * sizeof(struct pas_dma_xct_descr),
+			  mac->rx->desc, mac->rx->dma);
+
+	dma_free_coherent(&mac->dma_pdev->dev, mac->rx->count * sizeof(u64),
+			  mac->rx->buffers, mac->rx->buf_dma);
+
+	kfree(mac->rx->desc_info);
+	kfree(mac->rx);
+	mac->rx = NULL;
+}
+
+static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	int start = mac->rx->next_to_fill;
+	int count;
+
+	count = ((mac->rx->next_to_clean & ~7) + mac->rx->count -
+		 mac->rx->next_to_fill) % mac->rx->count;
+
+	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
+		count = mac->rx->count - 8;
+
+	/* Limit so we don't go into the last cache line */
+	count -= 8;
+
+	if (count <= 0)
+		return;
+
+	for (i = start; i < start + count; i++) {
+		struct pasemi_mac_buffer *info = &mac->rx->desc_info[i % mac->rx->count];
+		u64 *buff = &mac->rx->buffers[i % mac->rx->count];
+		struct sk_buff *skb;
+		dma_addr_t dma;
+
+		skb = dev_alloc_skb(BUF_SIZE);
+
+		if (!skb)
+			return;
+
+		skb->dev = dev;
+
+		dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
+				     PCI_DMA_FROMDEVICE);
+		info->skb = skb;
+		info->dma = dma;
+		*buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+	}
+
+	wmb();
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
+			       count);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_INCR(mac->dma_if),
+			       count);
+
+	mac->rx->next_to_fill += count;
+}
+
+static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
+{
+	unsigned int i;
+	int start, count;
+
+	spin_lock(&mac->rx->lock);
+
+	start = mac->rx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < (start + mac->rx->count) && count < limit; i++) {
+		struct pas_dma_xct_descr *dp;
+		struct pasemi_mac_buffer *info;
+		struct sk_buff *skb;
+		unsigned int j, len;
+		dma_addr_t dma;
+
+		rmb();
+
+		dp = &mac->rx->desc[i % mac->rx->count];
+
+		if (!(dp->macrx & XCT_MACRX_O))
+			break;
+
+		count++;
+
+		info = NULL;
+
+		/* We have to scan for our skb since there's no way
+		 * to back-map them from the descriptor, and if we
+		 * have several receive channels then they might not
+		 * show up in the same order as they were put on the
+		 * interface ring.
+		 */
+
+		dma = (dp->ptr & XCT_PTR_ADDR_M);
+		for (j = start; j < (start + mac->rx->count); j++) {
+			info = &mac->rx->desc_info[j % mac->rx->count];
+			if (info->dma == dma)
+				break;
+		}
+
+		BUG_ON(!info);
+		BUG_ON(info->dma != dma);
+
+		pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
+				 PCI_DMA_FROMDEVICE);
+
+		skb = info->skb;
+
+		len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
+
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, mac->netdev);
+
+		if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
+			skb->ip_summed = CHECKSUM_COMPLETE;
+			skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
+					   XCT_MACRX_CSUM_S;
+		} else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		mac->stats.rx_bytes += len;
+		mac->stats.rx_packets++;
+
+		netif_receive_skb(skb);
+
+		dp->ptr = 0;
+		dp->macrx = 0;
+		info->dma = 0;
+		info->skb = 0;
+	}
+
+	mac->rx->next_to_clean += count;
+	pasemi_mac_replenish_rx_ring(mac->netdev);
+
+	spin_unlock(&mac->rx->lock);
+
+	return count;
+}
+
+static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
+{
+	int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	int start, count;
+	int flags;
+
+	if (!spin_trylock_irqsave(&mac->tx->lock, flags))
+		return 0;
+
+	start = mac->tx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < mac->tx->next_to_use; i++) {
+		dp = &mac->tx->desc[i % mac->tx->count];
+		if (!dp || (dp->mactx & XCT_MACTX_O))
+			break;
+
+		count++;
+
+		info = &mac->tx->desc_info[i % mac->tx->count];
+
+		pci_unmap_single(mac->dma_pdev, info->dma,
+				 info->skb->len, PCI_DMA_TODEVICE);
+		dev_kfree_skb_irq(info->skb);
+		info->skb = NULL;
+		info->dma = 0;
+		dp->mactx = 0;
+		dp->ptr = 0;
+	}
+	mac->tx->next_to_clean += count;
+	spin_unlock_irqrestore(&mac->tx->lock, flags);
+
+	return count;
+}
+
+
+static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+
+	if (!(*mac->rx_status & PAS_STATUS_INT))
+		return IRQ_NONE;
+
+	netif_rx_schedule(dev);
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
+
+	reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
+	      PAS_IOB_DMA_RXCH_RESET_DINTC;
+	if (*mac->rx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev,
+			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
+
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+	int was_full;
+
+	was_full = mac->tx->next_to_clean + mac->tx->count == mac->tx->next_to_use;
+
+	if (!(*mac->tx_status & PAS_STATUS_INT))
+		return IRQ_NONE;
+
+	pasemi_mac_clean_tx(mac);
+
+	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
+	if (*mac->tx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
+			       reg);
+
+	if (was_full)
+		netif_wake_queue(dev);
+
+	return IRQ_HANDLED;
+}
+
+static int pasemi_mac_open(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+	int ret;
+
+	/* enable rx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
+			       PAS_DMA_COM_RXCMD_EN);
+
+	/* enable tx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
+			       PAS_DMA_COM_TXCMD_EN);
+
+	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
+		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
+		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
+
+	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
+		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
+
+	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
+			       PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+
+	ret = pasemi_mac_setup_rx_resources(dev);
+	if (ret)
+		goto out_rx_resources;
+
+	ret = pasemi_mac_setup_tx_resources(dev);
+	if (ret)
+		goto out_tx_resources;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
+			       PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
+			       PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
+
+	/* enable rx if */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+			       PAS_DMA_RXINT_RCMDSTA_EN);
+
+	/* enable rx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+			       PAS_DMA_RXCHAN_CCMDSTA_EN |
+			       PAS_DMA_RXCHAN_CCMDSTA_DU);
+
+	/* enable tx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_EN);
+
+	pasemi_mac_replenish_rx_ring(dev);
+
+	netif_start_queue(dev);
+	netif_poll_enable(dev);
+
+	ret = request_irq(mac->dma_pdev->irq + mac->dma_txch,
+			  &pasemi_mac_tx_intr, IRQF_DISABLED,
+			  mac->tx->irq_name, dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + mac->dma_txch, ret);
+		goto out_tx_int;
+	}
+
+	ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch,
+			  &pasemi_mac_rx_intr, IRQF_DISABLED,
+			  mac->rx->irq_name, dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
+		goto out_rx_int;
+	}
+
+	return 0;
+
+out_rx_int:
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+out_tx_int:
+	netif_poll_disable(dev);
+	netif_stop_queue(dev);
+	pasemi_mac_free_tx_resources(dev);
+out_tx_resources:
+	pasemi_mac_free_rx_resources(dev);
+out_rx_resources:
+
+	return ret;
+}
+
+static int pasemi_mac_close(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int stat;
+
+	netif_stop_queue(dev);
+
+	/* Clean out any pending buffers */
+	pasemi_mac_clean_tx(mac);
+	pasemi_mac_clean_rx(mac, mac->rx->count);
+
+	/* Disable interface */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+		      PAS_DMA_RXINT_RCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+		      PAS_DMA_RXCHAN_CCMDSTA_ST);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+				      &stat);
+	} while (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+				      &stat);
+	} while (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT);
+
+	do {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+				      &stat);
+	} while (stat & PAS_DMA_RXINT_RCMDSTA_ACT);
+
+	/* Then, disable the channel. This must be done separately from
+	 * stopping, since you can't disable when active.
+	 */
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+
+	synchronize_irq(mac->dma_pdev->irq + mac->dma_txch);
+	synchronize_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch);
+
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+	free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
+
+	/* Free resources */
+	pasemi_mac_free_rx_resources(dev);
+	pasemi_mac_free_tx_resources(dev);
+
+	return 0;
+}
+
+static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	struct pasemi_mac_txring *txring;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	u64 flags;
+	dma_addr_t map;
+
+	txring = mac->tx;
+
+	if (txring->next_to_clean + txring->count == txring->next_to_use) {
+		pasemi_mac_clean_tx(mac);
+
+		if (txring->next_to_clean + txring->count == txring->next_to_use) {
+			/* Still no room -- stop the queue and wait for tx
+			 * intr when there's room.
+			 */
+			netif_stop_queue(dev);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	mac->stats.tx_packets++;
+	mac->stats.tx_bytes += skb->len;
+
+	flags = XCT_MACTX_O | XCT_MACTX_ST |
+		XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		switch (skb->nh.iph->protocol) {
+		case IPPROTO_TCP:
+			flags |= XCT_MACTX_CSUM_TCP;
+			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		case IPPROTO_UDP:
+			flags |= XCT_MACTX_CSUM_UDP;
+			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		}
+	}
+
+	map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+
+	dp = &txring->desc[txring->next_to_use % txring->count];
+	info = &txring->desc_info[txring->next_to_use % txring->count];
+
+	dp->mactx = flags | XCT_MACTX_LLEN(skb->len);
+	dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
+	info->dma = map;
+	info->skb = skb;
+	/* XXXOJN Deal with fragmented packets when larger MTU is supported */
+
+	txring->next_to_use++;
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
+
+	return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	return &mac->stats;
+}
+
+static void pasemi_mac_set_rx_mode(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+
+	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
+
+	/* Set promiscuous */
+	if (dev->flags & IFF_PROMISC)
+		flags |= PAS_MAC_CFG_PCFG_PR;
+	else
+		flags &= ~PAS_MAC_CFG_PCFG_PR;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+}
+
+
+static int pasemi_mac_poll(struct net_device *dev, int *budget)
+{
+	int pkts, limit = min(*budget, dev->quota);
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	pkts = pasemi_mac_clean_rx(mac, limit);
+
+	if (pkts < limit) {
+		/* all done, no more packets present */
+		netif_rx_complete(dev);
+
+		/* re-enable receive interrupts */
+		pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+				       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+		return 0;
+	} else {
+		/* used up our quantum, so reschedule */
+		dev->quota -= pkts;
+		*budget -= pkts;
+		return 1;
+	}
+}
+
+static int __devinit
+pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int index = 0;
+	struct net_device *dev;
+	struct pasemi_mac *mac;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pasemi_mac: Could not enable device.\n");
+		return err;
+	}
+	dev = alloc_etherdev(sizeof(struct pasemi_mac));
+	if (dev == NULL) {
+		dev_err(&pdev->dev,
+			"pasemi_mac: Could not allocate ethernet device.\n");
+		return -ENOMEM;
+	}
+
+	SET_MODULE_OWNER(dev);
+	pci_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	mac = netdev_priv(dev);
+
+	mac->pdev = pdev;
+	mac->netdev = dev;
+	mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
+
+	if (!mac->dma_pdev) {
+		dev_err(&pdev->dev, "Can't find DMA Controller\n");
+		free_netdev(dev);
+		return -ENODEV;
+	}
+
+	mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
+
+	if (!mac->iob_pdev) {
+		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
+		free_netdev(dev);
+		return -ENODEV;
+	}
+
+	/* These should come out of the device tree eventually */
+	mac->dma_txch = index;
+	mac->dma_rxch = index;
+
+	/* We probe GMAC before XAUI, but the DMA interfaces are
+	 * in XAUI, GMAC order.
+	 */
+	if (index < 4)
+		mac->dma_if = index + 2;
+	else
+		mac->dma_if = index - 4;
+	index++;
+
+	switch (pdev->device) {
+	case 0xa005:
+		mac->type = MAC_TYPE_GMAC;
+		break;
+	case 0xa006:
+		mac->type = MAC_TYPE_XAUI;
+		break;
+	default:
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* get mac addr from device tree */
+	if (pasemi_set_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
+		err = -ENODEV;
+		goto out;
+	}
+	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+
+	dev->open = pasemi_mac_open;
+	dev->stop = pasemi_mac_close;
+	dev->hard_start_xmit = pasemi_mac_start_tx;
+	dev->get_stats = pasemi_mac_get_stats;
+	dev->set_multicast_list = pasemi_mac_set_rx_mode;
+	dev->weight = 64;
+	dev->poll = pasemi_mac_poll;
+	dev->features = NETIF_F_HW_CSUM;
+
+	/* The dma status structure is located in the I/O bridge, and
+	 * is cache coherent.
+	 */
+	if (!dma_status)
+		/* XXXOJN This should come from the device tree */
+		dma_status = __ioremap(0xfd800000, 0x1000, 0);
+
+	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
+	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
+
+	err = register_netdev(dev);
+
+	if (err) {
+		dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
+			err);
+		goto out;
+	} else
+		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
+		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
+		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
+		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+	return err;
+
+out:
+	dev_err(&mac->pdev->dev, "pasemi_mac: init failed\n");
+
+	pci_disable_device(pdev);
+	pci_dev_put(mac->dma_pdev);
+	pci_dev_put(mac->iob_pdev);
+	free_netdev(dev);
+	return err;
+}
+
+static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct pasemi_mac *mac;
+
+	if (!netdev)
+		return;
+
+	mac = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	pci_disable_device(pdev);
+	pci_dev_put(mac->dma_pdev);
+	pci_dev_put(mac->iob_pdev);
+
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+}
+
+static struct pci_device_id pasemi_mac_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
+
+static struct pci_driver pasemi_mac_driver = {
+	.name		= "pasemi_mac",
+	.id_table	= pasemi_mac_pci_tbl,
+	.probe		= pasemi_mac_probe,
+	.remove		= __devexit_p(pasemi_mac_remove),
+};
+
+static void __exit pasemi_mac_cleanup_module(void)
+{
+	pci_unregister_driver(&pasemi_mac_driver);
+	__iounmap(dma_status);
+	dma_status = NULL;
+}
+
+int pasemi_mac_init_module(void)
+{
+	return pci_register_driver(&pasemi_mac_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
+
+module_init(pasemi_mac_init_module);
+module_exit(pasemi_mac_cleanup_module);
Index: merge/drivers/net/pasemi_mac.h
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2006 PA Semi, Inc
+ *
+ * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef PASEMI_MAC_H
+#define PASEMI_MAC_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+struct pasemi_mac_txring {
+	spinlock_t	 lock;
+	struct pas_dma_xct_descr	*desc;
+	dma_addr_t	 dma;
+	unsigned int	 size;
+	unsigned int	 count;
+	unsigned int	 next_to_use;
+	unsigned int	 next_to_clean;
+	unsigned short	 last_count;
+	struct pasemi_mac_buffer *desc_info;
+	char		 irq_name[10];  /* "eth%d tx" */
+};
+
+struct pasemi_mac_rxring {
+	spinlock_t	 lock;
+	struct pas_dma_xct_descr	*desc;	/* RX channel descriptor ring */
+	dma_addr_t	 dma;
+	u64		*buffers;	/* RX interface buffer ring */
+	dma_addr_t	 buf_dma;
+	unsigned int	 size;
+	unsigned int	 count;
+	unsigned int	 next_to_fill;
+	unsigned int	 next_to_clean;
+	unsigned short	 last_count;
+	struct pasemi_mac_buffer *desc_info;
+	char		 irq_name[10];  /* "eth%d rx" */
+};
+
+/* Number of unused descriptors, considering ring wraparounds */
+#define PASEMI_MAC_DESC_UNUSED(ring) ((((ring)->next_to_clean >		\
+					(ring)->next_to_use) ?		\
+					  0 :				\
+					  (ring)->count) +		\
+					  (ring)->next_to_clean -	\
+					  (ring)->next_to_use - 1)
+
+struct pasemi_mac {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct pci_dev *dma_pdev;
+	struct pci_dev *iob_pdev;
+	struct net_device_stats stats;
+
+	/* Pointer to the cacheable per-channel status registers */
+	u64	*rx_status;
+	u64	*tx_status;
+
+	u8		type;
+#define MAC_TYPE_GMAC	1
+#define MAC_TYPE_XAUI	2
+	u32	dma_txch;
+	u32	dma_if;
+	u32	dma_rxch;
+
+	u8		mac_addr[6];
+
+	struct timer_list	rxtimer;
+
+	struct pasemi_mac_txring *tx;
+	struct pasemi_mac_rxring *rx;
+};
+
+struct pasemi_mac_buffer {
+	struct sk_buff *skb;
+	dma_addr_t	dma;
+};
+
+
+
+#define PAS_MAC_CFG_PCFG		0x80
+#define    PAS_MAC_CFG_PCFG_PE		0x80000000
+#define    PAS_MAC_CFG_PCFG_CE		0x40000000
+#define    PAS_MAC_CFG_PCFG_BU		0x20000000
+#define    PAS_MAC_CFG_PCFG_TT		0x10000000
+#define    PAS_MAC_CFG_PCFG_TSR_M	0x0c000000
+#define    PAS_MAC_CFG_PCFG_TSR_10M	0x00000000
+#define    PAS_MAC_CFG_PCFG_TSR_100M	0x04000000
+#define    PAS_MAC_CFG_PCFG_TSR_1G	0x08000000
+#define    PAS_MAC_CFG_PCFG_TSR_10G	0x0c000000
+#define    PAS_MAC_CFG_PCFG_T24		0x02000000
+#define    PAS_MAC_CFG_PCFG_PR		0x01000000
+#define    PAS_MAC_CFG_PCFG_CRO_M	0x00ff0000
+#define    PAS_MAC_CFG_PCFG_CRO_S	16
+#define    PAS_MAC_CFG_PCFG_IPO_M	0x0000ff00
+#define    PAS_MAC_CFG_PCFG_IPO_S	8
+#define    PAS_MAC_CFG_PCFG_S1		0x00000080
+#define    PAS_MAC_CFG_PCFG_IO_M	0x00000060
+#define    PAS_MAC_CFG_PCFG_IO_MAC	0x00000000
+#define    PAS_MAC_CFG_PCFG_IO_OFF	0x00000020
+#define    PAS_MAC_CFG_PCFG_IO_IND_ETH	0x00000040
+#define    PAS_MAC_CFG_PCFG_IO_IND_IP	0x00000060
+#define    PAS_MAC_CFG_PCFG_LP		0x00000010
+#define    PAS_MAC_CFG_PCFG_TS		0x00000008
+#define    PAS_MAC_CFG_PCFG_HD		0x00000004
+#define    PAS_MAC_CFG_PCFG_SPD_M	0x00000003
+#define    PAS_MAC_CFG_PCFG_SPD_10M	0x00000000
+#define    PAS_MAC_CFG_PCFG_SPD_100M	0x00000001
+#define    PAS_MAC_CFG_PCFG_SPD_1G	0x00000002
+#define    PAS_MAC_CFG_PCFG_SPD_10G	0x00000003
+#define PAS_MAC_CFG_TXP			0x98
+#define    PAS_MAC_CFG_TXP_FCF		0x01000000
+#define    PAS_MAC_CFG_TXP_FCE		0x00800000
+#define    PAS_MAC_CFG_TXP_FC		0x00400000
+#define    PAS_MAC_CFG_TXP_FPC_M	0x00300000
+#define    PAS_MAC_CFG_TXP_FPC_S	20
+#define    PAS_MAC_CFG_TXP_FPC(x)	(((x) << PAS_MAC_CFG_TXP_FPC_S) & PAS_MAC_CFG_TXP_FPC_M)
+#define    PAS_MAC_CFG_TXP_RT		0x00080000
+#define    PAS_MAC_CFG_TXP_BL		0x00040000
+#define    PAS_MAC_CFG_TXP_SL_M		0x00030000
+#define    PAS_MAC_CFG_TXP_SL_S		16
+#define    PAS_MAC_CFG_TXP_SL(x)	(((x) << PAS_MAC_CFG_TXP_SL_S) & PAS_MAC_CFG_TXP_SL_M)
+#define    PAS_MAC_CFG_TXP_COB_M	0x0000f000
+#define    PAS_MAC_CFG_TXP_COB_S	12
+#define    PAS_MAC_CFG_TXP_COB(x)	(((x) << PAS_MAC_CFG_TXP_COB_S) & PAS_MAC_CFG_TXP_COB_M)
+#define    PAS_MAC_CFG_TXP_TIFT_M	0x00000f00
+#define    PAS_MAC_CFG_TXP_TIFT_S	8
+#define    PAS_MAC_CFG_TXP_TIFT(x)	(((x) << PAS_MAC_CFG_TXP_TIFT_S) & PAS_MAC_CFG_TXP_TIFT_M)
+#define    PAS_MAC_CFG_TXP_TIFG_M	0x000000ff
+#define    PAS_MAC_CFG_TXP_TIFG_S	0
+#define    PAS_MAC_CFG_TXP_TIFG(x)	(((x) << PAS_MAC_CFG_TXP_TIFG_S) & PAS_MAC_CFG_TXP_TIFG_M)
+
+#define PAS_MAC_IPC_CHNL		0x208
+#define    PAS_MAC_IPC_CHNL_DCHNO_M	0x003f0000
+#define    PAS_MAC_IPC_CHNL_DCHNO_S	16
+#define    PAS_MAC_IPC_CHNL_DCHNO(x)	(((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
+					 PAS_MAC_IPC_CHNL_DCHNO_M)
+#define    PAS_MAC_IPC_CHNL_BCH_M	0x0000003f
+#define    PAS_MAC_IPC_CHNL_BCH_S	0
+#define    PAS_MAC_IPC_CHNL_BCH(x)	(((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
+					 PAS_MAC_IPC_CHNL_BCH_M)
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+
+#define PAS_DMA_COM_TXCMD	0x100	/* Transmit Command Register  */
+#define    PAS_DMA_COM_TXCMD_EN		0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA	0x104	/* Transmit Status Register   */
+#define    PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD	0x108	/* Receive Command Register   */
+#define    PAS_DMA_COM_RXCMD_EN		0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA	0x10c	/* Receive Status Register    */
+#define    PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
+
+
+#define _PAS_DMA_RXINT_STRIDE		0x20
+#define PAS_DMA_RXINT_RCMDSTA(i)	(0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_RCMDSTA_EN	0x00000001
+#define    PAS_DMA_RXINT_RCMDSTA_ST	0x00000002
+#define    PAS_DMA_RXINT_RCMDSTA_OO	0x00000100
+#define    PAS_DMA_RXINT_RCMDSTA_BP	0x00000200
+#define    PAS_DMA_RXINT_RCMDSTA_DR	0x00000400
+#define    PAS_DMA_RXINT_RCMDSTA_BT	0x00000800
+#define    PAS_DMA_RXINT_RCMDSTA_TB	0x00001000
+#define    PAS_DMA_RXINT_RCMDSTA_ACT	0x00010000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_M	0xfffe0000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_S	17
+#define PAS_DMA_RXINT_INCR(i)		(0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_INCR_INCR_M	0x0000ffff
+#define    PAS_DMA_RXINT_INCR_INCR_S	0
+#define    PAS_DMA_RXINT_INCR_INCR(x)	((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i)		(0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEL_BRBL(x)	((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i)		(0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEU_BRBH(x)	((x) & 0xfff)
+#define    PAS_DMA_RXINT_BASEU_SIZ_M	0x3fff0000	/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXINT_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXINT_BASEU_SIZ(x)	(((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+					 PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
+#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
+#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
+#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
+#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
+#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
+#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
+#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
+#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+					 PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
+#define    PAS_DMA_TXCHAN_CFG_WT_S	6
+#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+					 PAS_DMA_TXCHAN_CFG_WT_M)
+#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
+#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
+#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
+#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_RXCHAN_CCMDSTA	0x800	/* Command / Status		*/
+#define _PAS_DMA_RXCHAN_CFG	0x804	/* Configuration		*/
+#define _PAS_DMA_RXCHAN_INCR	0x810	/* Descriptor increment		*/
+#define _PAS_DMA_RXCHAN_CNT	0x814	/* Descriptor count/offset	*/
+#define _PAS_DMA_RXCHAN_BASEL	0x818	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_RXCHAN_BASEU	0x81c	/*			(high)	*/
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ACT	0x00010000	/* Active */
+#define    PAS_DMA_RXCHAN_CCMDSTA_DU	0x00020000
+#define PAS_DMA_RXCHAN_CFG(c)     (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CFG_HBU_M	0x00000380
+#define    PAS_DMA_RXCHAN_CFG_HBU_S	7
+#define    PAS_DMA_RXCHAN_CFG_HBU(x)	(((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+					 PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c)    (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c)   (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c)   (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_RXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+	u64 rx_sta[64];
+	u64 tx_sta[20];
+};
+
+#define    PAS_STATUS_PCNT_M		0x000000000000ffff
+#define    PAS_STATUS_PCNT_S		0
+#define    PAS_STATUS_DCNT_M		0x00000000ffff0000
+#define    PAS_STATUS_DCNT_S		16
+#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000
+#define    PAS_STATUS_BPCNT_S		32
+#define    PAS_STATUS_TIMER		0x1000000000000000
+#define    PAS_STATUS_ERROR		0x2000000000000000
+#define    PAS_STATUS_SOFT		0x4000000000000000
+#define    PAS_STATUS_INT		0x8000000000000000
+
+#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+struct pas_dma_xct_descr {
+	union {
+ 		u64	mactx;
+#define	XCT_MACTX_T		0x8000000000000000
+#define	XCT_MACTX_ST		0x4000000000000000
+#define XCT_MACTX_NORES		0x0000000000000000
+#define XCT_MACTX_8BRES		0x1000000000000000
+#define XCT_MACTX_24BRES	0x2000000000000000
+#define XCT_MACTX_40BRES	0x3000000000000000
+#define XCT_MACTX_I		0x0800000000000000
+#define XCT_MACTX_O		0x0400000000000000
+#define XCT_MACTX_E		0x0200000000000000
+#define XCT_MACTX_VLAN_M	0x0180000000000000
+#define XCT_MACTX_VLAN_NOP	0x0000000000000000
+#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000
+#define XCT_MACTX_VLAN_INSERT   0x0100000000000000
+#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000
+#define XCT_MACTX_CRC_M		0x0060000000000000
+#define XCT_MACTX_CRC_NOP	0x0000000000000000
+#define XCT_MACTX_CRC_INSERT	0x0020000000000000
+#define XCT_MACTX_CRC_PAD	0x0040000000000000
+#define XCT_MACTX_CRC_REPLACE	0x0060000000000000
+#define XCT_MACTX_SS		0x0010000000000000
+#define XCT_MACTX_LLEN_M	0x00007fff00000000
+#define XCT_MACTX_LLEN_S	32ull
+#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M		0x00000000f8000000
+#define XCT_MACTX_IPH_S		27ull
+#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M		0x0000000007c00000
+#define XCT_MACTX_IPO_S		22ull
+#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M	0x0000000000000060
+#define XCT_MACTX_CSUM_NOP	0x0000000000000000
+#define XCT_MACTX_CSUM_TCP	0x0000000000000040
+#define XCT_MACTX_CSUM_UDP	0x0000000000000060
+#define XCT_MACTX_V6		0x0000000000000010
+#define XCT_MACTX_C		0x0000000000000004
+#define XCT_MACTX_AL2		0x0000000000000002
+		u64	macrx;
+#define	XCT_MACRX_T		0x8000000000000000
+#define	XCT_MACRX_ST		0x4000000000000000
+#define XCT_MACRX_NORES		0x0000000000000000
+#define XCT_MACRX_8BRES		0x1000000000000000
+#define XCT_MACRX_24BRES	0x2000000000000000
+#define XCT_MACRX_40BRES	0x3000000000000000
+#define XCT_MACRX_O		0x0400000000000000
+#define XCT_MACRX_E		0x0200000000000000
+#define XCT_MACRX_FF		0x0100000000000000
+#define XCT_MACRX_PF		0x0080000000000000
+#define XCT_MACRX_OB		0x0040000000000000
+#define XCT_MACRX_OD		0x0020000000000000
+#define XCT_MACRX_FS		0x0010000000000000
+#define XCT_MACRX_NB_M		0x000fc00000000000
+#define XCT_MACRX_NB_S		46ULL
+#define XCT_MACRX_NB(x)		((((long)(x)) << XCT_MACRX_NB_S) & XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M	0x00003fff00000000
+#define XCT_MACRX_LLEN_S	32ULL
+#define XCT_MACRX_LLEN(x)	((((long)(x)) << XCT_MACRX_LLEN_S) & XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC		0x0000000080000000
+#define XCT_MACRX_LEN_M		0x0000000060000000
+#define XCT_MACRX_LEN_TOOSHORT	0x0000000020000000
+#define XCT_MACRX_LEN_BELOWMIN	0x0000000040000000
+#define XCT_MACRX_LEN_TRUNC	0x0000000060000000
+#define XCT_MACRX_CAST_M	0x0000000018000000
+#define XCT_MACRX_CAST_UNI	0x0000000000000000
+#define XCT_MACRX_CAST_MULTI	0x0000000008000000
+#define XCT_MACRX_CAST_BROAD	0x0000000010000000
+#define XCT_MACRX_CAST_PAUSE	0x0000000018000000
+#define XCT_MACRX_VLC_M		0x0000000006000000
+#define XCT_MACRX_FM		0x0000000001000000
+#define XCT_MACRX_HTY_M		0x0000000000c00000
+#define XCT_MACRX_HTY_IPV4_OK	0x0000000000000000
+#define XCT_MACRX_HTY_IPV6 	0x0000000000400000
+#define XCT_MACRX_HTY_IPV4_BAD	0x0000000000800000
+#define XCT_MACRX_HTY_NONIP	0x0000000000c00000
+#define XCT_MACRX_IPP_M		0x00000000003f0000
+#define XCT_MACRX_IPP_S		16
+#define XCT_MACRX_CSUM_M	0x000000000000ffff
+#define XCT_MACRX_CSUM_S	0
+	};
+	union {
+		u64	ptr;
+#define XCT_PTR_T		0x8000000000000000
+#define XCT_PTR_LEN_M		0x7ffff00000000000
+#define XCT_PTR_LEN_S		44
+#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M		0x00000fffffffffff
+#define XCT_PTR_ADDR_S		0
+#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+		u64	rxb;
+#define XCT_RXB_LEN_M		0x0ffff00000000000
+#define XCT_RXB_LEN_S		44
+#define XCT_RXB_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_RXB_ADDR_M		0x00000fffffffffff
+#define XCT_RXB_ADDR_S		0
+#define XCT_RXB_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+	};
+};
+
+#endif /* PASEMI_MAC_H */
Index: merge/MAINTAINERS
===================================================================
--- merge.orig/MAINTAINERS
+++ merge/MAINTAINERS
@@ -2484,6 +2484,12 @@ L:	orinoco-devel@lists.sourceforge.net
 W:	http://www.nongnu.org/orinoco/
 S:	Maintained
 
+PA SEMI ETHERNET DRIVER
+P:	Olof Johansson
+M:	olof@lixom.net
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 PARALLEL PORT SUPPORT
 P:	Phil Blundell
 M:	philb@gnu.org
Index: merge/include/linux/pci_ids.h
===================================================================
--- merge.orig/include/linux/pci_ids.h
+++ merge/include/linux/pci_ids.h
@@ -2064,6 +2064,8 @@
 #define PCI_VENDOR_ID_TDI               0x192E
 #define PCI_DEVICE_ID_TDI_EHCI          0x0101
 
+#define PCI_VENDOR_ID_PASEMI		0x1959
+
 #define PCI_VENDOR_ID_JMICRON		0x197B
 #define PCI_DEVICE_ID_JMICRON_JMB360	0x2360
 #define PCI_DEVICE_ID_JMICRON_JMB361	0x2361

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] [v3] PA Semi PWRficient Ethernet driver
  2007-01-31  5:44   ` [PATCH] [v3] PA " Olof Johansson
@ 2007-01-31 10:34     ` Jeff Garzik
  2007-02-01  3:40       ` Olof Johansson
  2007-01-31 12:44     ` Ingo Oeser
                       ` (2 subsequent siblings)
  3 siblings, 1 reply; 23+ messages in thread
From: Jeff Garzik @ 2007-01-31 10:34 UTC (permalink / raw)
  To: Olof Johansson
  Cc: netdev, Stephen Hemminger, Francois Romieu, Christoph Hellwig

Olof Johansson wrote:
> Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)
> 
> Basic enablement, will be complemented with performance enhancements
> over time. PHY support will be added as well.
> 
> Signed-off-by: Olof Johansson <olof@lixom.net>

Looks generally pretty clean, well done.

Comments included inline...

> +#include "pasemi_mac.h"
> +
> +#define INITIAL_RX_RING_SIZE 512
> +#define INITIAL_TX_RING_SIZE 512
> +
> +#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
> +
> +#define PAS_DMA_MAX_IF     40
> +#define PAS_DMA_MAX_RXCH   8
> +#define PAS_DMA_MAX_TXCH   8
> +
> +/* XXXOJN these should come out of the device tree some day */
> +#define PAS_DMA_CAP_BASE   0xe00d0040
> +#define PAS_DMA_CAP_SIZE   0x100
> +#define PAS_DMA_COM_BASE   0xe00d0100
> +#define PAS_DMA_COM_SIZE   0x100

consider enums rather than #define's for constants.  they generate 
symbols at the C level rather than cpp level, making the code more 
readable, providing more type information to the C compiler, and making 
symbols visible at the debugger level.

example:

enum {
	PAS_DMA_MAX_IF		= 40,
	PAS_DMA_MAX_RXCH	= 8,
	PAS_DMA_MAX_TXCH	= 8,
};

> +static int pasemi_set_mac_addr(struct pasemi_mac *mac)

poor name.  from the context of the code reader and driver, this should 
be "pasemi_GET_mac_addr", rather than ...set...
;

> +	struct pci_dev *pdev = mac->pdev;
> +	struct device_node *dn = pci_device_to_OF_node(pdev);
> +	const u8 *maddr;
> +	u8 addr[6];
> +
> +	if (!dn) {
> +		dev_dbg(&pdev->dev,
> +			  "No device node for mac, not configuring\n");
> +		return -ENOENT;
> +	}
> +
> +	maddr = get_property(dn, "mac-address", NULL);
> +	if (maddr == NULL) {
> +		dev_warn(&pdev->dev,
> +			 "no mac address in device tree, not configuring\n");
> +		return -ENOENT;
> +	}
> +
> +	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
> +		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
> +		dev_warn(&pdev->dev,
> +			 "can't parse mac address, not configuring\n");
> +		return -EINVAL;
> +	}
> +
> +	memcpy(mac->mac_addr, addr, sizeof(addr));
> +	return 0;
> +}
> +
> +
> +static noinline void pasemi_mac_free_tx_resources(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int i;
> +	struct pasemi_mac_buffer *info;
> +	struct pas_dma_xct_descr *dp;
> +
> +	for (i = 0; i < mac->tx->count; i++) {
> +		info = &mac->tx->desc_info[i % mac->tx->count];
> +		dp = &mac->tx->desc[i % mac->tx->count];
> +		if (info->dma) {
> +			if (info->skb) {
> +				pci_unmap_single(mac->dma_pdev,
> +						 info->dma,
> +						 info->skb->len,
> +						 PCI_DMA_TODEVICE);
> +				dev_kfree_skb_any(info->skb);
> +			}
> +			info->dma = 0;
> +			info->skb = 0;
> +			dp->mactx = 0;
> +			dp->ptr = 0;

"0" is not the same as "NULL".  Use NULL where appropriate.

Then make sure your driver passes sparse checks (read 
Documentation/sparse.txt)


> +	dma_free_coherent(&mac->dma_pdev->dev,
> +			  mac->tx->count * sizeof(struct pas_dma_xct_descr),
> +			  mac->tx->desc, mac->tx->dma);
> +
> +	kfree(mac->tx->desc_info);
> +	kfree(mac->tx);
> +	mac->tx = NULL;
> +}
> +
> +static noinline void pasemi_mac_free_rx_resources(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int i;
> +	struct pasemi_mac_buffer *info;
> +	struct pas_dma_xct_descr *dp;
> +
> +	for (i = 0; i < mac->rx->count; i++) {
> +		info = &mac->rx->desc_info[i % mac->rx->count];
> +		dp = &mac->rx->desc[i % mac->rx->count];
> +		if (info->dma) {
> +			if (info->skb) {
> +				pci_unmap_single(mac->dma_pdev,
> +						 info->dma,
> +						 info->skb->len,
> +						 PCI_DMA_FROMDEVICE);
> +				dev_kfree_skb_any(info->skb);
> +			}
> +			info->dma = 0;
> +			info->skb = 0;
> +			dp->macrx = 0;
> +			dp->ptr = 0;

0 != NULL


> +	dma_free_coherent(&mac->dma_pdev->dev,
> +			  mac->rx->count * sizeof(struct pas_dma_xct_descr),
> +			  mac->rx->desc, mac->rx->dma);
> +
> +	dma_free_coherent(&mac->dma_pdev->dev, mac->rx->count * sizeof(u64),
> +			  mac->rx->buffers, mac->rx->buf_dma);
> +
> +	kfree(mac->rx->desc_info);
> +	kfree(mac->rx);
> +	mac->rx = NULL;
> +}
> +
> +static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int i;
> +	int start = mac->rx->next_to_fill;
> +	int count;
> +
> +	count = ((mac->rx->next_to_clean & ~7) + mac->rx->count -
> +		 mac->rx->next_to_fill) % mac->rx->count;

if feasible, logical operations are often more optimal than '%'

maybe you need something like tg3.c's NEXT_TX() ?


> +	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
> +		count = mac->rx->count - 8;

why this is needed?

> +	/* Limit so we don't go into the last cache line */
> +	count -= 8;
> +
> +	if (count <= 0)
> +		return;
> +
> +	for (i = start; i < start + count; i++) {
> +		struct pasemi_mac_buffer *info = &mac->rx->desc_info[i % mac->rx->count];
> +		u64 *buff = &mac->rx->buffers[i % mac->rx->count];
> +		struct sk_buff *skb;
> +		dma_addr_t dma;
> +
> +		skb = dev_alloc_skb(BUF_SIZE);
> +
> +		if (!skb)
> +			return;
> +
> +		skb->dev = dev;
> +
> +		dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
> +				     PCI_DMA_FROMDEVICE);

check for DMA mapping error


> +		info->skb = skb;
> +		info->dma = dma;
> +		*buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
> +	}
> +
> +	wmb();
> +
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
> +			       count);
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXINT_INCR(mac->dma_if),
> +			       count);
> +
> +	mac->rx->next_to_fill += count;
> +}
> +
> +static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
> +{
> +	unsigned int i;
> +	int start, count;
> +
> +	spin_lock(&mac->rx->lock);
> +
> +	start = mac->rx->next_to_clean;
> +	count = 0;
> +
> +	for (i = start; i < (start + mac->rx->count) && count < limit; i++) {
> +		struct pas_dma_xct_descr *dp;
> +		struct pasemi_mac_buffer *info;
> +		struct sk_buff *skb;
> +		unsigned int j, len;
> +		dma_addr_t dma;
> +
> +		rmb();
> +
> +		dp = &mac->rx->desc[i % mac->rx->count];
> +
> +		if (!(dp->macrx & XCT_MACRX_O))
> +			break;
> +
> +		count++;
> +
> +		info = NULL;
> +
> +		/* We have to scan for our skb since there's no way
> +		 * to back-map them from the descriptor, and if we
> +		 * have several receive channels then they might not
> +		 * show up in the same order as they were put on the
> +		 * interface ring.
> +		 */
> +
> +		dma = (dp->ptr & XCT_PTR_ADDR_M);
> +		for (j = start; j < (start + mac->rx->count); j++) {
> +			info = &mac->rx->desc_info[j % mac->rx->count];
> +			if (info->dma == dma)
> +				break;
> +		}
> +
> +		BUG_ON(!info);
> +		BUG_ON(info->dma != dma);
> +
> +		pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
> +				 PCI_DMA_FROMDEVICE);
> +
> +		skb = info->skb;
> +
> +		len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
> +
> +		skb_put(skb, len);
> +
> +		skb->protocol = eth_type_trans(skb, mac->netdev);
> +
> +		if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
> +			skb->ip_summed = CHECKSUM_COMPLETE;
> +			skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
> +					   XCT_MACRX_CSUM_S;
> +		} else
> +			skb->ip_summed = CHECKSUM_NONE;
> +
> +		mac->stats.rx_bytes += len;
> +		mac->stats.rx_packets++;
> +
> +		netif_receive_skb(skb);
> +
> +		dp->ptr = 0;
> +		dp->macrx = 0;
> +		info->dma = 0;
> +		info->skb = 0;

0 != NULL


> +	mac->rx->next_to_clean += count;
> +	pasemi_mac_replenish_rx_ring(mac->netdev);
> +
> +	spin_unlock(&mac->rx->lock);
> +
> +	return count;
> +}
> +
> +static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
> +{
> +	int i;
> +	struct pasemi_mac_buffer *info;
> +	struct pas_dma_xct_descr *dp;
> +	int start, count;
> +	int flags;
> +
> +	if (!spin_trylock_irqsave(&mac->tx->lock, flags))
> +		return 0;

what prevents starvation?


> +	start = mac->tx->next_to_clean;
> +	count = 0;
> +
> +	for (i = start; i < mac->tx->next_to_use; i++) {
> +		dp = &mac->tx->desc[i % mac->tx->count];
> +		if (!dp || (dp->mactx & XCT_MACTX_O))
> +			break;
> +
> +		count++;
> +
> +		info = &mac->tx->desc_info[i % mac->tx->count];
> +
> +		pci_unmap_single(mac->dma_pdev, info->dma,
> +				 info->skb->len, PCI_DMA_TODEVICE);
> +		dev_kfree_skb_irq(info->skb);
> +		info->skb = NULL;
> +		info->dma = 0;
> +		dp->mactx = 0;
> +		dp->ptr = 0;
> +	}
> +	mac->tx->next_to_clean += count;
> +	spin_unlock_irqrestore(&mac->tx->lock, flags);
> +
> +	return count;
> +}
> +
> +
> +static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
> +{
> +	struct net_device *dev = data;
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int reg;
> +
> +	if (!(*mac->rx_status & PAS_STATUS_INT))
> +		return IRQ_NONE;
> +
> +	netif_rx_schedule(dev);
> +	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
> +			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
> +
> +	reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
> +	      PAS_IOB_DMA_RXCH_RESET_DINTC;
> +	if (*mac->rx_status & PAS_STATUS_TIMER)
> +		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
> +
> +	pci_write_config_dword(mac->iob_pdev,
> +			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);

is there any faster method of register reading/writing than through PCI 
config registers?

pci_{read,write}_config_foo acquires and releases a spinlock for each 
operation, making it rather expensive in fast path code


> +static int pasemi_mac_close(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int stat;
> +
> +	netif_stop_queue(dev);
> +
> +	/* Clean out any pending buffers */
> +	pasemi_mac_clean_tx(mac);
> +	pasemi_mac_clean_rx(mac, mac->rx->count);
> +
> +	/* Disable interface */
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
> +			       PAS_DMA_TXCHAN_TCMDSTA_ST);
> +	pci_write_config_dword(mac->dma_pdev,
> +		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
> +		      PAS_DMA_RXINT_RCMDSTA_ST);
> +	pci_write_config_dword(mac->dma_pdev,
> +		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
> +		      PAS_DMA_RXCHAN_CCMDSTA_ST);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
> +				      &stat);
> +	} while (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
> +				      &stat);
> +	} while (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
> +				      &stat);
> +	} while (stat & PAS_DMA_RXINT_RCMDSTA_ACT);
> +
> +	/* Then, disable the channel. This must be done separately from
> +	 * stopping, since you can't disable when active.
> +	 */
> +
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
> +
> +	synchronize_irq(mac->dma_pdev->irq + mac->dma_txch);
> +	synchronize_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch);
> +
> +	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
> +	free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);

shouldn't need synchronize_irq() right before free_irq()


> +	/* Free resources */
> +	pasemi_mac_free_rx_resources(dev);
> +	pasemi_mac_free_tx_resources(dev);
> +
> +	return 0;
> +}
> +
> +static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	struct pasemi_mac_txring *txring;
> +	struct pasemi_mac_buffer *info;
> +	struct pas_dma_xct_descr *dp;
> +	u64 flags;
> +	dma_addr_t map;

needs locking, as mentioned elsewhere


> +	txring = mac->tx;
> +
> +	if (txring->next_to_clean + txring->count == txring->next_to_use) {
> +		pasemi_mac_clean_tx(mac);
> +
> +		if (txring->next_to_clean + txring->count == txring->next_to_use) {
> +			/* Still no room -- stop the queue and wait for tx
> +			 * intr when there's room.
> +			 */
> +			netif_stop_queue(dev);
> +			return NETDEV_TX_BUSY;
> +		}
> +	}
> +
> +	mac->stats.tx_packets++;
> +	mac->stats.tx_bytes += skb->len;
> +
> +	flags = XCT_MACTX_O | XCT_MACTX_ST |
> +		XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
> +
> +	if (skb->ip_summed == CHECKSUM_PARTIAL) {
> +		switch (skb->nh.iph->protocol) {
> +		case IPPROTO_TCP:
> +			flags |= XCT_MACTX_CSUM_TCP;
> +			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
> +			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
> +			break;
> +		case IPPROTO_UDP:
> +			flags |= XCT_MACTX_CSUM_UDP;
> +			flags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
> +			flags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
> +			break;
> +		}
> +	}
> +
> +	map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);

check for mapping error


> +	dp = &txring->desc[txring->next_to_use % txring->count];
> +	info = &txring->desc_info[txring->next_to_use % txring->count];
> +
> +	dp->mactx = flags | XCT_MACTX_LLEN(skb->len);
> +	dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
> +	info->dma = map;
> +	info->skb = skb;
> +	/* XXXOJN Deal with fragmented packets when larger MTU is supported */

does this comment imply that larger MTUs make the driver go splat, or 
does driver code elsewhere prevent the user from using an invalid MTU?


> +	txring->next_to_use++;
> +
> +	pci_write_config_dword(mac->dma_pdev,
> +			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
> +
> +	return NETDEV_TX_OK;
> +}
> +
> +static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +
> +	return &mac->stats;
> +}
> +
> +static void pasemi_mac_set_rx_mode(struct net_device *dev)
> +{
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +	unsigned int flags;
> +
> +	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
> +
> +	/* Set promiscuous */
> +	if (dev->flags & IFF_PROMISC)
> +		flags |= PAS_MAC_CFG_PCFG_PR;
> +	else
> +		flags &= ~PAS_MAC_CFG_PCFG_PR;
> +
> +	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);

you have no multicast capability?


> +static int pasemi_mac_poll(struct net_device *dev, int *budget)
> +{
> +	int pkts, limit = min(*budget, dev->quota);
> +	struct pasemi_mac *mac = netdev_priv(dev);
> +
> +	pkts = pasemi_mac_clean_rx(mac, limit);
> +
> +	if (pkts < limit) {
> +		/* all done, no more packets present */
> +		netif_rx_complete(dev);
> +
> +		/* re-enable receive interrupts */
> +		pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
> +				       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
> +		return 0;
> +	} else {
> +		/* used up our quantum, so reschedule */
> +		dev->quota -= pkts;
> +		*budget -= pkts;
> +		return 1;
> +	}
> +}
> +
> +static int __devinit
> +pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
> +{
> +	static int index = 0;
> +	struct net_device *dev;
> +	struct pasemi_mac *mac;
> +	int err;
> +
> +	err = pci_enable_device(pdev);
> +	if (err) {
> +		dev_err(&pdev->dev, "pasemi_mac: Could not enable device.\n");
> +		return err;

PCI layer already prints out an error msg for this case


> +	dev = alloc_etherdev(sizeof(struct pasemi_mac));
> +	if (dev == NULL) {
> +		dev_err(&pdev->dev,
> +			"pasemi_mac: Could not allocate ethernet device.\n");
> +		return -ENOMEM;

call pci_disable_device() on error


> +	SET_MODULE_OWNER(dev);
> +	pci_set_drvdata(pdev, dev);
> +	SET_NETDEV_DEV(dev, &pdev->dev);
> +
> +	mac = netdev_priv(dev);
> +
> +	mac->pdev = pdev;
> +	mac->netdev = dev;
> +	mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
> +
> +	if (!mac->dma_pdev) {
> +		dev_err(&pdev->dev, "Can't find DMA Controller\n");
> +		free_netdev(dev);

undo pci_enable_device()
undo pci_set_drvdata()

> +		return -ENODEV;
> +	}
> +
> +	mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
> +
> +	if (!mac->iob_pdev) {
> +		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
> +		free_netdev(dev);

ditto


> +		return -ENODEV;
> +	}
> +
> +	/* These should come out of the device tree eventually */
> +	mac->dma_txch = index;
> +	mac->dma_rxch = index;
> +
> +	/* We probe GMAC before XAUI, but the DMA interfaces are
> +	 * in XAUI, GMAC order.
> +	 */
> +	if (index < 4)
> +		mac->dma_if = index + 2;
> +	else
> +		mac->dma_if = index - 4;
> +	index++;
> +
> +	switch (pdev->device) {
> +	case 0xa005:
> +		mac->type = MAC_TYPE_GMAC;
> +		break;
> +	case 0xa006:
> +		mac->type = MAC_TYPE_XAUI;
> +		break;
> +	default:
> +		err = -ENODEV;
> +		goto out;
> +	}
> +
> +	/* get mac addr from device tree */
> +	if (pasemi_set_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
> +		err = -ENODEV;
> +		goto out;
> +	}
> +	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
> +
> +	dev->open = pasemi_mac_open;
> +	dev->stop = pasemi_mac_close;
> +	dev->hard_start_xmit = pasemi_mac_start_tx;
> +	dev->get_stats = pasemi_mac_get_stats;
> +	dev->set_multicast_list = pasemi_mac_set_rx_mode;
> +	dev->weight = 64;
> +	dev->poll = pasemi_mac_poll;
> +	dev->features = NETIF_F_HW_CSUM;
> +
> +	/* The dma status structure is located in the I/O bridge, and
> +	 * is cache coherent.
> +	 */
> +	if (!dma_status)
> +		/* XXXOJN This should come from the device tree */
> +		dma_status = __ioremap(0xfd800000, 0x1000, 0);

why __ioremap ?


> +	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
> +	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
> +
> +	err = register_netdev(dev);
> +
> +	if (err) {
> +		dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
> +			err);
> +		goto out;
> +	} else
> +		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
> +		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
> +		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
> +		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
> +		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
> +		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
> +
> +	return err;
> +
> +out:
> +	dev_err(&mac->pdev->dev, "pasemi_mac: init failed\n");
> +
> +	pci_disable_device(pdev);
> +	pci_dev_put(mac->dma_pdev);
> +	pci_dev_put(mac->iob_pdev);
> +	free_netdev(dev);
> +	return err;
> +}
> +
> +static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
> +{
> +	struct net_device *netdev = pci_get_drvdata(pdev);
> +	struct pasemi_mac *mac;
> +
> +	if (!netdev)
> +		return;
> +
> +	mac = netdev_priv(netdev);
> +
> +	unregister_netdev(netdev);
> +
> +	pci_disable_device(pdev);
> +	pci_dev_put(mac->dma_pdev);
> +	pci_dev_put(mac->iob_pdev);
> +
> +	pci_set_drvdata(pdev, NULL);
> +	free_netdev(netdev);
> +}
> +
> +static struct pci_device_id pasemi_mac_pci_tbl[] = {
> +	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
> +	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
> +	{ 0 }

remove the '0'


> +};
> +
> +MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
> +
> +static struct pci_driver pasemi_mac_driver = {
> +	.name		= "pasemi_mac",
> +	.id_table	= pasemi_mac_pci_tbl,
> +	.probe		= pasemi_mac_probe,
> +	.remove		= __devexit_p(pasemi_mac_remove),
> +};
> +
> +static void __exit pasemi_mac_cleanup_module(void)
> +{
> +	pci_unregister_driver(&pasemi_mac_driver);
> +	__iounmap(dma_status);
> +	dma_status = NULL;
> +}
> +
> +int pasemi_mac_init_module(void)
> +{
> +	return pci_register_driver(&pasemi_mac_driver);
> +}
> +
> +MODULE_LICENSE("GPL");
> +MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
> +MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
> +
> +module_init(pasemi_mac_init_module);


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] [v3] PA Semi PWRficient Ethernet driver
  2007-01-31  5:44   ` [PATCH] [v3] PA " Olof Johansson
  2007-01-31 10:34     ` Jeff Garzik
@ 2007-01-31 12:44     ` Ingo Oeser
  2007-01-31 15:16       ` Olof Johansson
  2007-01-31 18:38     ` Stephen Hemminger
  2007-02-01  3:43     ` [PATCH] [v4] " Olof Johansson
  3 siblings, 1 reply; 23+ messages in thread
From: Ingo Oeser @ 2007-01-31 12:44 UTC (permalink / raw)
  To: Olof Johansson
  Cc: jgarzik, netdev, Stephen Hemminger, Francois Romieu, Christoph Hellwig

Hi,

Olof Johansson schrieb:
[...]
> +static int pasemi_mac_close(struct net_device *dev)
> +{
[..]
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
> +				      &stat);
> +	} while (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
> +				      &stat);
> +	} while (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT);
> +
> +	do {
> +		pci_read_config_dword(mac->dma_pdev,
> +				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
> +				      &stat);
> +	} while (stat & PAS_DMA_RXINT_RCMDSTA_ACT);

You might want to write these loops like that:

#define MAX_READ_TRIES 10000
unsigned int tries;
unsigned int state;
for(tries=0; tries < MAX_READ_TRIES; tries++)
{
	read_stat(&stat);
	if ((stat & STATE_FLAG) == 0);
		break;
	cond_resched();
}
if (stat & STATE_FLAG) {
	dev_err(&mac->pdev->dev, "Failed to stop device, possible hardware error?\n");
        /* Panic, disable device, mark unusable, whatever is better than hanging here */
}

That way you:
- Let you give other processes a chance to run, 
  while you wait for your hardware state to change 
  (if your hardware can tolerate these latencies).
- can somehow handle hardware failure and at least give the user a clue
  what is happening.

Regards

Ingo Oeser

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] [v3] PA Semi PWRficient Ethernet driver
  2007-01-31 12:44     ` Ingo Oeser
@ 2007-01-31 15:16       ` Olof Johansson
  0 siblings, 0 replies; 23+ messages in thread
From: Olof Johansson @ 2007-01-31 15:16 UTC (permalink / raw)
  To: Ingo Oeser
  Cc: jgarzik, netdev, Stephen Hemminger, Francois Romieu, Christoph Hellwig

Hi,

On Wed, Jan 31, 2007 at 01:44:31PM +0100, Ingo Oeser wrote:

> You might want to write these loops like that:
[...]

> That way you:
> - Let you give other processes a chance to run, 
>   while you wait for your hardware state to change 
>   (if your hardware can tolerate these latencies).
> - can somehow handle hardware failure and at least give the user a clue
>   what is happening.

Yep, makes sense. I'll implement something like it.


Thanks,

-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] [v3] PA Semi PWRficient Ethernet driver
  2007-01-31  5:44   ` [PATCH] [v3] PA " Olof Johansson
  2007-01-31 10:34     ` Jeff Garzik
  2007-01-31 12:44     ` Ingo Oeser
@ 2007-01-31 18:38     ` Stephen Hemminger
  2007-02-01  3:20       ` Olof Johansson
  2007-02-01  3:43     ` [PATCH] [v4] " Olof Johansson
  3 siblings, 1 reply; 23+ messages in thread
From: Stephen Hemminger @ 2007-01-31 18:38 UTC (permalink / raw)
  To: Olof Johansson; +Cc: jgarzik, netdev, Francois Romieu, Christoph Hellwig

O
> +static noinline void pasemi_mac_free_tx_resources(struct net_device *dev)

Why tell compiler how to do it's job.  marking it noinline?

> Index: merge/drivers/net/pasemi_mac.h
> ===================================================================
> --- /dev/null
> +++ merge/drivers/net/pasemi_mac.h
					 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
> +
> +struct pas_dma_xct_descr {
> +	union {
> + 		u64	mactx;
> +#define	XCT_MACTX_T		0x8000000000000000
> +#define	XCT_MACTX_ST		0x4000000000000000
> +#define XCT_MACTX_NORES		0x0000000000000000
> +#define XCT_MACTX_8BRES		0x1000000000000000
> +#define XCT_MACTX_24BRES	0x2000000000000000
> +#define XCT_MACTX_40BRES	0x3000000000000000
> +#define XCT_MACTX_I		0x0800000000000000
> +#define XCT_MACTX_O		0x0400000000000000
> +#define XCT_MACTX_E		0x0200000000000000
> +#define XCT_MACTX_VLAN_M	0x0180000000000000
> +#define XCT_MACTX_VLAN_NOP	0x0000000000000000
> +#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000
> +#define XCT_MACTX_VLAN_INSERT   0x0100000000000000
> +#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000
> +#define XCT_MACTX_CRC_M		0x0060000000000000
> +#define XCT_MACTX_CRC_NOP	0x0000000000000000
> +#define XCT_MACTX_CRC_INSERT	0x0020000000000000
> +#define XCT_MACTX_CRC_PAD	0x0040000000000000
> +#define XCT_MACTX_CRC_REPLACE	0x0060000000000000
> +#define XCT_MACTX_SS		0x0010000000000000
> +#define XCT_MACTX_LLEN_M	0x00007fff00000000
> +#define XCT_MACTX_LLEN_S	32ull
> +#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & XCT_MACTX_LLEN_M)
> +#define XCT_MACTX_IPH_M		0x00000000f8000000
> +#define XCT_MACTX_IPH_S		27ull
> +#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & XCT_MACTX_IPH_M)
> +#define XCT_MACTX_IPO_M		0x0000000007c00000
> +#define XCT_MACTX_IPO_S		22ull
> +#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & XCT_MACTX_IPO_M)
> +#define XCT_MACTX_CSUM_M	0x0000000000000060
> +#define XCT_MACTX_CSUM_NOP	0x0000000000000000
> +#define XCT_MACTX_CSUM_TCP	0x0000000000000040
> +#define XCT_MACTX_CSUM_UDP	0x0000000000000060
> +#define XCT_MACTX_V6		0x0000000000000010
> +#define XCT_MACTX_C		0x0000000000000004
> +#define XCT_MACTX_AL2		0x0000000000000002
> +		u64	macrx;

Mixing definitions and structure values gets confusing.

-- 
Stephen Hemminger <shemminger@linux-foundation.org>

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] [v3] PA Semi PWRficient Ethernet driver
  2007-01-31 18:38     ` Stephen Hemminger
@ 2007-02-01  3:20       ` Olof Johansson
  0 siblings, 0 replies; 23+ messages in thread
From: Olof Johansson @ 2007-02-01  3:20 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: jgarzik, netdev, Francois Romieu, Christoph Hellwig

On Wed, Jan 31, 2007 at 10:38:06AM -0800, Stephen Hemminger wrote:
> O
> > +static noinline void pasemi_mac_free_tx_resources(struct net_device *dev)
> 
> Why tell compiler how to do it's job.  marking it noinline?

Leftover from debugging. Gone in the next version.

> Mixing definitions and structure values gets confusing.

I find if the definitions are few, having them near the structure
definition is easier to read. This is way past the limit for that
though. I'll move them.


Thanks,

-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] [v3] PA Semi PWRficient Ethernet driver
  2007-01-31 10:34     ` Jeff Garzik
@ 2007-02-01  3:40       ` Olof Johansson
  0 siblings, 0 replies; 23+ messages in thread
From: Olof Johansson @ 2007-02-01  3:40 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: netdev, Stephen Hemminger, Francois Romieu, Christoph Hellwig

On Wed, Jan 31, 2007 at 05:34:06AM -0500, Jeff Garzik wrote:
> Olof Johansson wrote:
> >Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)
> >
> >Basic enablement, will be complemented with performance enhancements
> >over time. PHY support will be added as well.
> >
> >Signed-off-by: Olof Johansson <olof@lixom.net>
> 
> Looks generally pretty clean, well done.

Getting better,  I really should have found alot of this on my own. Thanks
to everyone for their patience.

I added a TODO list in the driver for the things that I'll add
incrementally over time.

(I also took out some unused definitions while I was at it)

> Comments included inline...

Replies where approprate, taking out most of the code. New patch posted
separately.

> consider enums rather than #define's for constants.  they generate 
> symbols at the C level rather than cpp level, making the code more 
> readable, providing more type information to the C compiler, and making 
> symbols visible at the debugger level.
> 
> example:
> 
> enum {
> 	PAS_DMA_MAX_IF		= 40,
> 	PAS_DMA_MAX_RXCH	= 8,
> 	PAS_DMA_MAX_TXCH	= 8,
> };

That works quite OK for things like register numbers. For bitfields
I'm not so sure the compiler/debugger will have much benefit from it
though, right? It's also nice to keep the mask/shift and macro together.

I've broken out the simpler register numbers into enums. If you feel
really strongly that I should have the rest the same way I'll give it
a shot.

Also, enums are ints, right? The 64-bit fields will be hard to describe
that way.

> 
> >+static int pasemi_set_mac_addr(struct pasemi_mac *mac)
> 
> poor name.  from the context of the code reader and driver, this should 
> be "pasemi_GET_mac_addr", rather than ...set...

Set it in the structure, get it from the hardware. Yes, I was thinking
of it the other way around there.  Fixed.

> "0" is not the same as "NULL".  Use NULL where appropriate.
> 
> Then make sure your driver passes sparse checks (read 
> Documentation/sparse.txt)

Fixed, and other places where sparse complained (0x...ull on large
constants).

Only exception is the ioremap, see below.

> if feasible, logical operations are often more optimal than '%'
> 
> maybe you need something like tg3.c's NEXT_TX() ?

Nice. Yes, much better. Only drawback is that the ring size will be
compile-time (not that I have the ethtool hookups to set it now anyway).

> >+	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 
> >0))
> >+		count = mac->rx->count - 8;
> 
> why this is needed?

Added a comment before the statment why the check is needed -- both
will be 0 on the very first fill. The -8 can come out now, old workaround
that's no longer needed.

> >+		dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
> >+				     PCI_DMA_FROMDEVICE);
> 
> check for DMA mapping error

Done. However, I noticed that lots of other network drivers don't do
it. :-)

> >+	if (!spin_trylock_irqsave(&mac->tx->lock, flags))
> >+		return 0;
> 
> what prevents starvation?

Uhm, nothing. Thanks.

> >+	pci_write_config_dword(mac->iob_pdev,
> >+			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
> 
> is there any faster method of register reading/writing than through PCI 
> config registers?
> 
> pci_{read,write}_config_foo acquires and releases a spinlock for each 
> operation, making it rather expensive in fast path code

Yeah, I noticed lately that while it fits nice with our register structure,
it's really heavy on overhead.

It's at the top of my list for what to work on next; If it's OK with
you I'd prefer to do it incrementally after merge though.

> >+static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device
> >*dev)
> >+{
> >+	struct pasemi_mac *mac = netdev_priv(dev);
> >+	struct pasemi_mac_txring *txring;
> >+	struct pasemi_mac_buffer *info;
> >+	struct pas_dma_xct_descr *dp;
> >+	u64 flags;
> >+	dma_addr_t map;
> 
> needs locking, as mentioned elsewhere

Doh, need to protect the whole ring not just in clean. Added.

> >+	/* XXXOJN Deal with fragmented packets when larger MTU is supported 
> >*/
> 
> does this comment imply that larger MTUs make the driver go splat, or 
> does driver code elsewhere prevent the user from using an invalid MTU?

Larger MTU support is just not wired up yet (no change_mtu function). It's
on the list.

> >+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
> 
> you have no multicast capability?

The device has it, just not hooked up in the driver. On the list.

> >+	/* The dma status structure is located in the I/O bridge, and
> >+	 * is cache coherent.
> >+	 */
> >+	if (!dma_status)
> >+		/* XXXOJN This should come from the device tree */
> >+		dma_status = __ioremap(0xfd800000, 0x1000, 0);
> 
> why __ioremap ?

As the comment says, the registers live in the I/O bridge, and they
are cache coherent. So to map them in, I use the version that (on
powerpc at least) you can specify the flags -- and I'm not specifying
_PAGE_NO_CACHE|_PAGE_GUARDED that the standard ioremap does.

Still, this makes sparse unhappy since it's still flagged as another
address space. I'm open for better ideas.


-Olof

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] [v4] PA Semi PWRficient Ethernet driver
  2007-01-31  5:44   ` [PATCH] [v3] PA " Olof Johansson
                       ` (2 preceding siblings ...)
  2007-01-31 18:38     ` Stephen Hemminger
@ 2007-02-01  3:43     ` Olof Johansson
  2007-02-02 13:26       ` Jeff Garzik
  3 siblings, 1 reply; 23+ messages in thread
From: Olof Johansson @ 2007-02-01  3:43 UTC (permalink / raw)
  To: jgarzik
  Cc: netdev, Stephen Hemminger, Francois Romieu, Christoph Hellwig,
	Ingo Oeser

Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)

Basic enablement, will be complemented with performance enhancements
over time. PHY support will be added as well.

Signed-off-by: Olof Johansson <olof@lixom.net>

---

v4 changes:

* Check for dma mapping error
* Sparse cleanups
* Moved some register offsets to enums
* Don't spin infinitely on interface shutdown
* Remove some more debug cruft
* Better ring wrapping
* More locking fixes
* Misc other cleanups

v3 changes:

* Added remove function
* Checking interrupt status in handler
* Misc cleanups w.r.t. ring handing (INFO/DESCR/BUFF are gone)
* Added locking of the rings
* Using PCI DMA for all buffers
* Setting the interrupt descriptor field to include interface number
* Added PCI vendor ID, it's been submitted to sf.net as well.
* Moved Kconfig entry to the 10GbE section


Index: merge/drivers/net/Kconfig
===================================================================
--- merge.orig/drivers/net/Kconfig
+++ merge/drivers/net/Kconfig
@@ -2488,6 +2488,13 @@ config NETXEN_NIC
 	help
 	  This enables the support for NetXen's Gigabit Ethernet card.
 
+config PASEMI_MAC
+	tristate "PA Semi 1/10Gbit MAC"
+	depends on PPC64 && PCI
+	help
+	  This driver supports the on-chip 1/10Gbit Ethernet controller on
+	  PA Semi's PWRficient line of chips.
+
 endmenu
 
 source "drivers/net/tokenring/Kconfig"
Index: merge/drivers/net/Makefile
===================================================================
--- merge.orig/drivers/net/Makefile
+++ merge/drivers/net/Makefile
@@ -196,6 +196,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
 
 obj-$(CONFIG_MACB) += macb.o
 
Index: merge/drivers/net/pasemi_mac.c
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+#include "pasemi_mac.h"
+
+
+/* TODO list
+ *
+ * - Get rid of pci_{read,write}_config(), map registers with ioremap
+ *   for performance
+ * - PHY support
+ * - Multicast support
+ * - Large MTU support
+ * - Other performance improvements
+ */
+
+
+/* Must be a power of two */
+#define RX_RING_SIZE 512
+#define TX_RING_SIZE 512
+
+#define TX_DESC(mac, num)	((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
+#define TX_DESC_INFO(mac, num)	((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
+#define RX_DESC(mac, num)	((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
+#define RX_DESC_INFO(mac, num)	((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
+#define RX_BUFF(mac, num)	((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
+
+#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+
+/* XXXOJN these should come out of the device tree some day */
+#define PAS_DMA_CAP_BASE   0xe00d0040
+#define PAS_DMA_CAP_SIZE   0x100
+#define PAS_DMA_COM_BASE   0xe00d0100
+#define PAS_DMA_COM_SIZE   0x100
+
+static struct pasdma_status *dma_status;
+
+static int pasemi_get_mac_addr(struct pasemi_mac *mac)
+{
+	struct pci_dev *pdev = mac->pdev;
+	struct device_node *dn = pci_device_to_OF_node(pdev);
+	const u8 *maddr;
+	u8 addr[6];
+
+	if (!dn) {
+		dev_dbg(&pdev->dev,
+			  "No device node for mac, not configuring\n");
+		return -ENOENT;
+	}
+
+	maddr = get_property(dn, "mac-address", NULL);
+	if (maddr == NULL) {
+		dev_warn(&pdev->dev,
+			 "no mac address in device tree, not configuring\n");
+		return -ENOENT;
+	}
+
+	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
+		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+		dev_warn(&pdev->dev,
+			 "can't parse mac address, not configuring\n");
+		return -EINVAL;
+	}
+
+	memcpy(mac->mac_addr, addr, sizeof(addr));
+	return 0;
+}
+
+static int pasemi_mac_setup_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac_rxring *ring;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	int chan_id = mac->dma_rxch;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+	if (!ring)
+		goto out_ring;
+
+	spin_lock_init(&ring->lock);
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
+				  RX_RING_SIZE, GFP_KERNEL);
+
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
+					RX_RING_SIZE *
+					sizeof(struct pas_dma_xct_descr),
+					&ring->dma, GFP_KERNEL);
+
+	if (!ring->desc)
+		goto out_desc;
+
+	memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
+
+	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
+					   RX_RING_SIZE * sizeof(u64),
+					   &ring->buf_dma, GFP_KERNEL);
+	if (!ring->buffers)
+		goto out_buffers;
+
+	memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
+			       PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
+			       PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
+			       PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
+			       PAS_DMA_RXCHAN_CFG_HBU(1));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
+			       PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
+			       PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
+			       PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
+
+	ring->next_to_fill = 0;
+	ring->next_to_clean = 0;
+
+	snprintf(ring->irq_name, sizeof(ring->irq_name),
+		 "%s rx", dev->name);
+	mac->rx = ring;
+
+	return 0;
+
+out_buffers:
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
+			  mac->rx->desc, mac->rx->dma);
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+
+static int pasemi_mac_setup_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	u32 val;
+	int chan_id = mac->dma_txch;
+	struct pasemi_mac_txring *ring;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		goto out_ring;
+
+	spin_lock_init(&ring->lock);
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
+				  TX_RING_SIZE, GFP_KERNEL);
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
+					TX_RING_SIZE *
+					sizeof(struct pas_dma_xct_descr),
+					&ring->dma, GFP_KERNEL);
+	if (!ring->desc)
+		goto out_desc;
+
+	memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
+			       PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
+	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
+	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
+			       PAS_DMA_TXCHAN_CFG_TY_IFACE |
+			       PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
+			       PAS_DMA_TXCHAN_CFG_UP |
+			       PAS_DMA_TXCHAN_CFG_WT(2));
+
+	ring->next_to_use = 0;
+	ring->next_to_clean = 0;
+
+	snprintf(ring->irq_name, sizeof(ring->irq_name),
+		 "%s tx", dev->name);
+	mac->tx = ring;
+
+	return 0;
+
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+static void pasemi_mac_free_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		info = &TX_DESC_INFO(mac, i);
+		dp = &TX_DESC(mac, i);
+		if (info->dma) {
+			if (info->skb) {
+				pci_unmap_single(mac->dma_pdev,
+						 info->dma,
+						 info->skb->len,
+						 PCI_DMA_TODEVICE);
+				dev_kfree_skb_any(info->skb);
+			}
+			info->dma = 0;
+			info->skb = NULL;
+			dp->mactx = 0;
+			dp->ptr = 0;
+		}
+	}
+
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  TX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
+			  mac->tx->desc, mac->tx->dma);
+
+	kfree(mac->tx->desc_info);
+	kfree(mac->tx);
+	mac->tx = NULL;
+}
+
+static void pasemi_mac_free_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		info = &RX_DESC_INFO(mac, i);
+		dp = &RX_DESC(mac, i);
+		if (info->dma) {
+			if (info->skb) {
+				pci_unmap_single(mac->dma_pdev,
+						 info->dma,
+						 info->skb->len,
+						 PCI_DMA_FROMDEVICE);
+				dev_kfree_skb_any(info->skb);
+			}
+			info->dma = 0;
+			info->skb = NULL;
+			dp->macrx = 0;
+			dp->ptr = 0;
+		}
+	}
+
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
+			  mac->rx->desc, mac->rx->dma);
+
+	dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
+			  mac->rx->buffers, mac->rx->buf_dma);
+
+	kfree(mac->rx->desc_info);
+	kfree(mac->rx);
+	mac->rx = NULL;
+}
+
+static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	int start = mac->rx->next_to_fill;
+	unsigned int count;
+
+	count = (mac->rx->next_to_clean + RX_RING_SIZE -
+		 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
+
+	/* Check to see if we're doing first-time setup */
+	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
+		count = RX_RING_SIZE;
+
+	if (count <= 0)
+		return;
+
+	for (i = start; i < start + count; i++) {
+		struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
+		u64 *buff = &RX_BUFF(mac, i);
+		struct sk_buff *skb;
+		dma_addr_t dma;
+
+		skb = dev_alloc_skb(BUF_SIZE);
+
+		if (!skb) {
+			count = i - start;
+			break;
+		}
+
+		skb->dev = dev;
+
+		dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
+				     PCI_DMA_FROMDEVICE);
+
+		if (dma_mapping_error(dma)) {
+			dev_kfree_skb_irq(info->skb);
+			count = i - start;
+			break;
+		}
+
+		info->skb = skb;
+		info->dma = dma;
+		*buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+	}
+
+	wmb();
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
+			       count);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_INCR(mac->dma_if),
+			       count);
+
+	mac->rx->next_to_fill += count;
+}
+
+static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
+{
+	unsigned int i;
+	int start, count;
+
+	spin_lock(&mac->rx->lock);
+
+	start = mac->rx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) {
+		struct pas_dma_xct_descr *dp;
+		struct pasemi_mac_buffer *info;
+		struct sk_buff *skb;
+		unsigned int j, len;
+		dma_addr_t dma;
+
+		rmb();
+
+		dp = &RX_DESC(mac, i);
+
+		if (!(dp->macrx & XCT_MACRX_O))
+			break;
+
+		count++;
+
+		info = NULL;
+
+		/* We have to scan for our skb since there's no way
+		 * to back-map them from the descriptor, and if we
+		 * have several receive channels then they might not
+		 * show up in the same order as they were put on the
+		 * interface ring.
+		 */
+
+		dma = (dp->ptr & XCT_PTR_ADDR_M);
+		for (j = start; j < (start + RX_RING_SIZE); j++) {
+			info = &RX_DESC_INFO(mac, j);
+			if (info->dma == dma)
+				break;
+		}
+
+		BUG_ON(!info);
+		BUG_ON(info->dma != dma);
+
+		pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
+				 PCI_DMA_FROMDEVICE);
+
+		skb = info->skb;
+
+		len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
+
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, mac->netdev);
+
+		if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
+			skb->ip_summed = CHECKSUM_COMPLETE;
+			skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
+					   XCT_MACRX_CSUM_S;
+		} else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		mac->stats.rx_bytes += len;
+		mac->stats.rx_packets++;
+
+		netif_receive_skb(skb);
+
+		info->dma = 0;
+		info->skb = NULL;
+		dp->ptr = 0;
+		dp->macrx = 0;
+	}
+
+	mac->rx->next_to_clean += count;
+	pasemi_mac_replenish_rx_ring(mac->netdev);
+
+	spin_unlock(&mac->rx->lock);
+
+	return count;
+}
+
+static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
+{
+	int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	int start, count;
+	int flags;
+
+	spin_lock_irqsave(&mac->tx->lock, flags);
+
+	start = mac->tx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < mac->tx->next_to_use; i++) {
+		dp = &TX_DESC(mac, i);
+		if (!dp || (dp->mactx & XCT_MACTX_O))
+			break;
+
+		count++;
+
+		info = &TX_DESC_INFO(mac, i);
+
+		pci_unmap_single(mac->dma_pdev, info->dma,
+				 info->skb->len, PCI_DMA_TODEVICE);
+		dev_kfree_skb_irq(info->skb);
+
+		info->skb = NULL;
+		info->dma = 0;
+		dp->mactx = 0;
+		dp->ptr = 0;
+	}
+	mac->tx->next_to_clean += count;
+	spin_unlock_irqrestore(&mac->tx->lock, flags);
+
+	return count;
+}
+
+
+static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+
+	if (!(*mac->rx_status & PAS_STATUS_INT))
+		return IRQ_NONE;
+
+	netif_rx_schedule(dev);
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
+
+	reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
+	      PAS_IOB_DMA_RXCH_RESET_DINTC;
+	if (*mac->rx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev,
+			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
+
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+	int was_full;
+
+	was_full = mac->tx->next_to_clean - mac->tx->next_to_use == TX_RING_SIZE;
+
+	if (!(*mac->tx_status & PAS_STATUS_INT))
+		return IRQ_NONE;
+
+	pasemi_mac_clean_tx(mac);
+
+	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
+	if (*mac->tx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
+			       reg);
+
+	if (was_full)
+		netif_wake_queue(dev);
+
+	return IRQ_HANDLED;
+}
+
+static int pasemi_mac_open(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+	int ret;
+
+	/* enable rx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
+			       PAS_DMA_COM_RXCMD_EN);
+
+	/* enable tx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
+			       PAS_DMA_COM_TXCMD_EN);
+
+	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
+		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
+		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
+
+	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
+		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
+
+	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
+			       PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+
+	ret = pasemi_mac_setup_rx_resources(dev);
+	if (ret)
+		goto out_rx_resources;
+
+	ret = pasemi_mac_setup_tx_resources(dev);
+	if (ret)
+		goto out_tx_resources;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
+			       PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
+			       PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
+
+	/* enable rx if */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+			       PAS_DMA_RXINT_RCMDSTA_EN);
+
+	/* enable rx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+			       PAS_DMA_RXCHAN_CCMDSTA_EN |
+			       PAS_DMA_RXCHAN_CCMDSTA_DU);
+
+	/* enable tx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_EN);
+
+	pasemi_mac_replenish_rx_ring(dev);
+
+	netif_start_queue(dev);
+	netif_poll_enable(dev);
+
+	ret = request_irq(mac->dma_pdev->irq + mac->dma_txch,
+			  &pasemi_mac_tx_intr, IRQF_DISABLED,
+			  mac->tx->irq_name, dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + mac->dma_txch, ret);
+		goto out_tx_int;
+	}
+
+	ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch,
+			  &pasemi_mac_rx_intr, IRQF_DISABLED,
+			  mac->rx->irq_name, dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
+		goto out_rx_int;
+	}
+
+	return 0;
+
+out_rx_int:
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+out_tx_int:
+	netif_poll_disable(dev);
+	netif_stop_queue(dev);
+	pasemi_mac_free_tx_resources(dev);
+out_tx_resources:
+	pasemi_mac_free_rx_resources(dev);
+out_rx_resources:
+
+	return ret;
+}
+
+#define MAX_RETRIES 5000
+
+static int pasemi_mac_close(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int stat;
+	int retries;
+
+	netif_stop_queue(dev);
+
+	/* Clean out any pending buffers */
+	pasemi_mac_clean_tx(mac);
+	pasemi_mac_clean_rx(mac, RX_RING_SIZE);
+
+	/* Disable interface */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+		      PAS_DMA_RXINT_RCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+		      PAS_DMA_RXCHAN_CCMDSTA_ST);
+
+	for (retries = 0; retries < MAX_RETRIES; retries++) {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+				      &stat);
+		if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+			break;
+		cond_resched();
+	}
+
+	if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
+		dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
+	}
+
+	for (retries = 0; retries < MAX_RETRIES; retries++) {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+				      &stat);
+		if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
+			break;
+		cond_resched();
+	}
+
+	if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
+		dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
+	}
+
+	for (retries = 0; retries < MAX_RETRIES; retries++) {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+				      &stat);
+		if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
+			break;
+		cond_resched();
+	}
+
+	if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) {
+		dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
+	}
+
+	/* Then, disable the channel. This must be done separately from
+	 * stopping, since you can't disable when active.
+	 */
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+	free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
+
+	/* Free resources */
+	pasemi_mac_free_rx_resources(dev);
+	pasemi_mac_free_tx_resources(dev);
+
+	return 0;
+}
+
+static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	struct pasemi_mac_txring *txring;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	u64 dflags;
+	dma_addr_t map;
+	int flags;
+
+	dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		switch (skb->nh.iph->protocol) {
+		case IPPROTO_TCP:
+			dflags |= XCT_MACTX_CSUM_TCP;
+			dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		case IPPROTO_UDP:
+			dflags |= XCT_MACTX_CSUM_UDP;
+			dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		}
+	}
+
+	map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+
+	if (dma_mapping_error(map))
+		return NETDEV_TX_BUSY;
+
+	txring = mac->tx;
+
+	spin_lock_irqsave(&txring->lock, flags);
+
+	if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
+		spin_unlock_irqrestore(&txring->lock, flags);
+		pasemi_mac_clean_tx(mac);
+		spin_lock_irqsave(&txring->lock, flags);
+
+		if (txring->next_to_clean - txring->next_to_use ==
+		    TX_RING_SIZE) {
+			/* Still no room -- stop the queue and wait for tx
+			 * intr when there's room.
+			 */
+			netif_stop_queue(dev);
+			goto out_err;
+		}
+	}
+
+
+	dp = &TX_DESC(mac, txring->next_to_use);
+	info = &TX_DESC_INFO(mac, txring->next_to_use);
+
+	dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
+	dp->ptr   = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
+	info->dma = map;
+	info->skb = skb;
+
+	txring->next_to_use++;
+	mac->stats.tx_packets++;
+	mac->stats.tx_bytes += skb->len;
+
+	spin_unlock_irqrestore(&txring->lock, flags);
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
+
+	return NETDEV_TX_OK;
+
+out_err:
+	spin_unlock_irqrestore(&txring->lock, flags);
+	pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE);
+	return NETDEV_TX_BUSY;
+}
+
+static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	return &mac->stats;
+}
+
+static void pasemi_mac_set_rx_mode(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+
+	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
+
+	/* Set promiscuous */
+	if (dev->flags & IFF_PROMISC)
+		flags |= PAS_MAC_CFG_PCFG_PR;
+	else
+		flags &= ~PAS_MAC_CFG_PCFG_PR;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+}
+
+
+static int pasemi_mac_poll(struct net_device *dev, int *budget)
+{
+	int pkts, limit = min(*budget, dev->quota);
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	pkts = pasemi_mac_clean_rx(mac, limit);
+
+	if (pkts < limit) {
+		/* all done, no more packets present */
+		netif_rx_complete(dev);
+
+		/* re-enable receive interrupts */
+		pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+				       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+		return 0;
+	} else {
+		/* used up our quantum, so reschedule */
+		dev->quota -= pkts;
+		*budget -= pkts;
+		return 1;
+	}
+}
+
+static int __devinit
+pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int index = 0;
+	struct net_device *dev;
+	struct pasemi_mac *mac;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	dev = alloc_etherdev(sizeof(struct pasemi_mac));
+	if (dev == NULL) {
+		dev_err(&pdev->dev,
+			"pasemi_mac: Could not allocate ethernet device.\n");
+		err = -ENOMEM;
+		goto out_disable_device;
+	}
+
+	SET_MODULE_OWNER(dev);
+	pci_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	mac = netdev_priv(dev);
+
+	mac->pdev = pdev;
+	mac->netdev = dev;
+	mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
+
+	if (!mac->dma_pdev) {
+		dev_err(&pdev->dev, "Can't find DMA Controller\n");
+		err = -ENODEV;
+		goto out_free_netdev;
+	}
+
+	mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
+
+	if (!mac->iob_pdev) {
+		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
+		err = -ENODEV;
+		goto out_put_dma_pdev;
+	}
+
+	/* These should come out of the device tree eventually */
+	mac->dma_txch = index;
+	mac->dma_rxch = index;
+
+	/* We probe GMAC before XAUI, but the DMA interfaces are
+	 * in XAUI, GMAC order.
+	 */
+	if (index < 4)
+		mac->dma_if = index + 2;
+	else
+		mac->dma_if = index - 4;
+	index++;
+
+	switch (pdev->device) {
+	case 0xa005:
+		mac->type = MAC_TYPE_GMAC;
+		break;
+	case 0xa006:
+		mac->type = MAC_TYPE_XAUI;
+		break;
+	default:
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* get mac addr from device tree */
+	if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
+		err = -ENODEV;
+		goto out;
+	}
+	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+
+	dev->open = pasemi_mac_open;
+	dev->stop = pasemi_mac_close;
+	dev->hard_start_xmit = pasemi_mac_start_tx;
+	dev->get_stats = pasemi_mac_get_stats;
+	dev->set_multicast_list = pasemi_mac_set_rx_mode;
+	dev->weight = 64;
+	dev->poll = pasemi_mac_poll;
+	dev->features = NETIF_F_HW_CSUM;
+
+	/* The dma status structure is located in the I/O bridge, and
+	 * is cache coherent.
+	 */
+	if (!dma_status)
+		/* XXXOJN This should come from the device tree */
+		dma_status = __ioremap(0xfd800000, 0x1000, 0);
+
+	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
+	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
+
+	err = register_netdev(dev);
+
+	if (err) {
+		dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
+			err);
+		goto out;
+	} else
+		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
+		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
+		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
+		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+	return err;
+
+out:
+	pci_dev_put(mac->iob_pdev);
+out_put_dma_pdev:
+	pci_dev_put(mac->dma_pdev);
+out_free_netdev:
+	free_netdev(dev);
+out_disable_device:
+	pci_disable_device(pdev);
+	return err;
+
+}
+
+static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct pasemi_mac *mac;
+
+	if (!netdev)
+		return;
+
+	mac = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	pci_disable_device(pdev);
+	pci_dev_put(mac->dma_pdev);
+	pci_dev_put(mac->iob_pdev);
+
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+}
+
+static struct pci_device_id pasemi_mac_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
+
+static struct pci_driver pasemi_mac_driver = {
+	.name		= "pasemi_mac",
+	.id_table	= pasemi_mac_pci_tbl,
+	.probe		= pasemi_mac_probe,
+	.remove		= __devexit_p(pasemi_mac_remove),
+};
+
+static void __exit pasemi_mac_cleanup_module(void)
+{
+	pci_unregister_driver(&pasemi_mac_driver);
+	__iounmap(dma_status);
+	dma_status = NULL;
+}
+
+int pasemi_mac_init_module(void)
+{
+	return pci_register_driver(&pasemi_mac_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
+
+module_init(pasemi_mac_init_module);
+module_exit(pasemi_mac_cleanup_module);
Index: merge/drivers/net/pasemi_mac.h
===================================================================
--- /dev/null
+++ merge/drivers/net/pasemi_mac.h
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2006 PA Semi, Inc
+ *
+ * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef PASEMI_MAC_H
+#define PASEMI_MAC_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+struct pasemi_mac_txring {
+	spinlock_t	 lock;
+	struct pas_dma_xct_descr	*desc;
+	dma_addr_t	 dma;
+	unsigned int	 size;
+	unsigned int	 next_to_use;
+	unsigned int	 next_to_clean;
+	struct pasemi_mac_buffer *desc_info;
+	char		 irq_name[10];  /* "eth%d tx" */
+};
+
+struct pasemi_mac_rxring {
+	spinlock_t	 lock;
+	struct pas_dma_xct_descr	*desc;	/* RX channel descriptor ring */
+	dma_addr_t	 dma;
+	u64		*buffers;	/* RX interface buffer ring */
+	dma_addr_t	 buf_dma;
+	unsigned int	 size;
+	unsigned int	 next_to_fill;
+	unsigned int	 next_to_clean;
+	struct pasemi_mac_buffer *desc_info;
+	char		 irq_name[10];  /* "eth%d rx" */
+};
+
+struct pasemi_mac {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct pci_dev *dma_pdev;
+	struct pci_dev *iob_pdev;
+	struct net_device_stats stats;
+
+	/* Pointer to the cacheable per-channel status registers */
+	u64	*rx_status;
+	u64	*tx_status;
+
+	u8		type;
+#define MAC_TYPE_GMAC	1
+#define MAC_TYPE_XAUI	2
+	u32	dma_txch;
+	u32	dma_if;
+	u32	dma_rxch;
+
+	u8		mac_addr[6];
+
+	struct timer_list	rxtimer;
+
+	struct pasemi_mac_txring *tx;
+	struct pasemi_mac_rxring *rx;
+};
+
+/* Software status descriptor (desc_info) */
+struct pasemi_mac_buffer {
+	struct sk_buff *skb;
+	dma_addr_t	dma;
+};
+
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+	u64 rx_sta[64];
+	u64 tx_sta[20];
+};
+
+/* descriptor structure */
+struct pas_dma_xct_descr {
+	union {
+		u64	mactx;
+		u64	macrx;
+	};
+	union {
+		u64	ptr;
+		u64	rxb;
+	};
+};
+
+/* MAC CFG register offsets */
+
+enum {
+	PAS_MAC_CFG_PCFG = 0x80,
+	PAS_MAC_CFG_TXP = 0x98,
+	PAS_MAC_IPC_CHNL = 0x208,
+};
+
+/* MAC CFG register fields */
+#define PAS_MAC_CFG_PCFG_PE		0x80000000
+#define PAS_MAC_CFG_PCFG_CE		0x40000000
+#define PAS_MAC_CFG_PCFG_BU		0x20000000
+#define PAS_MAC_CFG_PCFG_TT		0x10000000
+#define PAS_MAC_CFG_PCFG_TSR_M		0x0c000000
+#define PAS_MAC_CFG_PCFG_TSR_10M	0x00000000
+#define PAS_MAC_CFG_PCFG_TSR_100M	0x04000000
+#define PAS_MAC_CFG_PCFG_TSR_1G		0x08000000
+#define PAS_MAC_CFG_PCFG_TSR_10G	0x0c000000
+#define PAS_MAC_CFG_PCFG_T24		0x02000000
+#define PAS_MAC_CFG_PCFG_PR		0x01000000
+#define PAS_MAC_CFG_PCFG_CRO_M		0x00ff0000
+#define PAS_MAC_CFG_PCFG_CRO_S	16
+#define PAS_MAC_CFG_PCFG_IPO_M		0x0000ff00
+#define PAS_MAC_CFG_PCFG_IPO_S	8
+#define PAS_MAC_CFG_PCFG_S1		0x00000080
+#define PAS_MAC_CFG_PCFG_IO_M		0x00000060
+#define PAS_MAC_CFG_PCFG_IO_MAC		0x00000000
+#define PAS_MAC_CFG_PCFG_IO_OFF		0x00000020
+#define PAS_MAC_CFG_PCFG_IO_IND_ETH	0x00000040
+#define PAS_MAC_CFG_PCFG_IO_IND_IP	0x00000060
+#define PAS_MAC_CFG_PCFG_LP		0x00000010
+#define PAS_MAC_CFG_PCFG_TS		0x00000008
+#define PAS_MAC_CFG_PCFG_HD		0x00000004
+#define PAS_MAC_CFG_PCFG_SPD_M		0x00000003
+#define PAS_MAC_CFG_PCFG_SPD_10M	0x00000000
+#define PAS_MAC_CFG_PCFG_SPD_100M	0x00000001
+#define PAS_MAC_CFG_PCFG_SPD_1G		0x00000002
+#define PAS_MAC_CFG_PCFG_SPD_10G	0x00000003
+#define PAS_MAC_CFG_TXP_FCF		0x01000000
+#define PAS_MAC_CFG_TXP_FCE		0x00800000
+#define PAS_MAC_CFG_TXP_FC		0x00400000
+#define PAS_MAC_CFG_TXP_FPC_M		0x00300000
+#define PAS_MAC_CFG_TXP_FPC_S		20
+#define PAS_MAC_CFG_TXP_FPC(x)		(((x) << PAS_MAC_CFG_TXP_FPC_S) & \
+					 PAS_MAC_CFG_TXP_FPC_M)
+#define PAS_MAC_CFG_TXP_RT		0x00080000
+#define PAS_MAC_CFG_TXP_BL		0x00040000
+#define PAS_MAC_CFG_TXP_SL_M		0x00030000
+#define PAS_MAC_CFG_TXP_SL_S		16
+#define PAS_MAC_CFG_TXP_SL(x)		(((x) << PAS_MAC_CFG_TXP_SL_S) & \
+					 PAS_MAC_CFG_TXP_SL_M)
+#define PAS_MAC_CFG_TXP_COB_M		0x0000f000
+#define PAS_MAC_CFG_TXP_COB_S		12
+#define PAS_MAC_CFG_TXP_COB(x)		(((x) << PAS_MAC_CFG_TXP_COB_S) & \
+					 PAS_MAC_CFG_TXP_COB_M)
+#define PAS_MAC_CFG_TXP_TIFT_M		0x00000f00
+#define PAS_MAC_CFG_TXP_TIFT_S		8
+#define PAS_MAC_CFG_TXP_TIFT(x)		(((x) << PAS_MAC_CFG_TXP_TIFT_S) & \
+					 PAS_MAC_CFG_TXP_TIFT_M)
+#define PAS_MAC_CFG_TXP_TIFG_M		0x000000ff
+#define PAS_MAC_CFG_TXP_TIFG_S		0
+#define PAS_MAC_CFG_TXP_TIFG(x)		(((x) << PAS_MAC_CFG_TXP_TIFG_S) & \
+					 PAS_MAC_CFG_TXP_TIFG_M)
+
+#define PAS_MAC_IPC_CHNL_DCHNO_M	0x003f0000
+#define PAS_MAC_IPC_CHNL_DCHNO_S	16
+#define PAS_MAC_IPC_CHNL_DCHNO(x)	(((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
+					 PAS_MAC_IPC_CHNL_DCHNO_M)
+#define PAS_MAC_IPC_CHNL_BCH_M		0x0000003f
+#define PAS_MAC_IPC_CHNL_BCH_S		0
+#define PAS_MAC_IPC_CHNL_BCH(x)		(((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
+					 PAS_MAC_IPC_CHNL_BCH_M)
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+enum {
+	PAS_DMA_COM_TXCMD = 0x100,	/* Transmit Command Register  */
+	PAS_DMA_COM_TXSTA = 0x104,	/* Transmit Status Register   */
+	PAS_DMA_COM_RXCMD = 0x108,	/* Receive Command Register   */
+	PAS_DMA_COM_RXSTA = 0x10c,	/* Receive Status Register    */
+};
+#define PAS_DMA_COM_TXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
+
+
+/* Per-interface and per-channel registers */
+#define _PAS_DMA_RXINT_STRIDE		0x20
+#define PAS_DMA_RXINT_RCMDSTA(i)	(0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_RCMDSTA_EN	0x00000001
+#define    PAS_DMA_RXINT_RCMDSTA_ST	0x00000002
+#define    PAS_DMA_RXINT_RCMDSTA_OO	0x00000100
+#define    PAS_DMA_RXINT_RCMDSTA_BP	0x00000200
+#define    PAS_DMA_RXINT_RCMDSTA_DR	0x00000400
+#define    PAS_DMA_RXINT_RCMDSTA_BT	0x00000800
+#define    PAS_DMA_RXINT_RCMDSTA_TB	0x00001000
+#define    PAS_DMA_RXINT_RCMDSTA_ACT	0x00010000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_M	0xfffe0000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_S	17
+#define PAS_DMA_RXINT_INCR(i)		(0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_INCR_INCR_M	0x0000ffff
+#define    PAS_DMA_RXINT_INCR_INCR_S	0
+#define    PAS_DMA_RXINT_INCR_INCR(x)	((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i)		(0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEL_BRBL(x)	((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i)		(0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEU_BRBH(x)	((x) & 0xfff)
+#define    PAS_DMA_RXINT_BASEU_SIZ_M	0x3fff0000	/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXINT_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXINT_BASEU_SIZ(x)	(((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+					 PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
+#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
+#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
+#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
+#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
+#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
+#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
+#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
+#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+					 PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
+#define    PAS_DMA_TXCHAN_CFG_WT_S	6
+#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+					 PAS_DMA_TXCHAN_CFG_WT_M)
+#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
+#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
+#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
+#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_RXCHAN_CCMDSTA	0x800	/* Command / Status		*/
+#define _PAS_DMA_RXCHAN_CFG	0x804	/* Configuration		*/
+#define _PAS_DMA_RXCHAN_INCR	0x810	/* Descriptor increment		*/
+#define _PAS_DMA_RXCHAN_CNT	0x814	/* Descriptor count/offset	*/
+#define _PAS_DMA_RXCHAN_BASEL	0x818	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_RXCHAN_BASEU	0x81c	/*			(high)	*/
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ACT	0x00010000	/* Active */
+#define    PAS_DMA_RXCHAN_CCMDSTA_DU	0x00020000
+#define PAS_DMA_RXCHAN_CFG(c)     (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CFG_HBU_M	0x00000380
+#define    PAS_DMA_RXCHAN_CFG_HBU_S	7
+#define    PAS_DMA_RXCHAN_CFG_HBU(x)	(((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+					 PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c)    (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c)   (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c)   (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_RXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+#define    PAS_STATUS_PCNT_M		0x000000000000ffffull
+#define    PAS_STATUS_PCNT_S		0
+#define    PAS_STATUS_DCNT_M		0x00000000ffff0000ull
+#define    PAS_STATUS_DCNT_S		16
+#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000ull
+#define    PAS_STATUS_BPCNT_S		32
+#define    PAS_STATUS_TIMER		0x1000000000000000ull
+#define    PAS_STATUS_ERROR		0x2000000000000000ull
+#define    PAS_STATUS_SOFT		0x4000000000000000ull
+#define    PAS_STATUS_INT		0x8000000000000000ull
+
+#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+/* Transmit descriptor fields */
+#define	XCT_MACTX_T		0x8000000000000000ull
+#define	XCT_MACTX_ST		0x4000000000000000ull
+#define XCT_MACTX_NORES		0x0000000000000000ull
+#define XCT_MACTX_8BRES		0x1000000000000000ull
+#define XCT_MACTX_24BRES	0x2000000000000000ull
+#define XCT_MACTX_40BRES	0x3000000000000000ull
+#define XCT_MACTX_I		0x0800000000000000ull
+#define XCT_MACTX_O		0x0400000000000000ull
+#define XCT_MACTX_E		0x0200000000000000ull
+#define XCT_MACTX_VLAN_M	0x0180000000000000ull
+#define XCT_MACTX_VLAN_NOP	0x0000000000000000ull
+#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000ull
+#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
+#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
+#define XCT_MACTX_CRC_M		0x0060000000000000ull
+#define XCT_MACTX_CRC_NOP	0x0000000000000000ull
+#define XCT_MACTX_CRC_INSERT	0x0020000000000000ull
+#define XCT_MACTX_CRC_PAD	0x0040000000000000ull
+#define XCT_MACTX_CRC_REPLACE	0x0060000000000000ull
+#define XCT_MACTX_SS		0x0010000000000000ull
+#define XCT_MACTX_LLEN_M	0x00007fff00000000ull
+#define XCT_MACTX_LLEN_S	32ull
+#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & \
+				 XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M		0x00000000f8000000ull
+#define XCT_MACTX_IPH_S		27ull
+#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & \
+				 XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M		0x0000000007c00000ull
+#define XCT_MACTX_IPO_S		22ull
+#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & \
+				 XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M	0x0000000000000060ull
+#define XCT_MACTX_CSUM_NOP	0x0000000000000000ull
+#define XCT_MACTX_CSUM_TCP	0x0000000000000040ull
+#define XCT_MACTX_CSUM_UDP	0x0000000000000060ull
+#define XCT_MACTX_V6		0x0000000000000010ull
+#define XCT_MACTX_C		0x0000000000000004ull
+#define XCT_MACTX_AL2		0x0000000000000002ull
+
+/* Receive descriptor fields */
+#define	XCT_MACRX_T		0x8000000000000000ull
+#define	XCT_MACRX_ST		0x4000000000000000ull
+#define XCT_MACRX_NORES		0x0000000000000000ull
+#define XCT_MACRX_8BRES		0x1000000000000000ull
+#define XCT_MACRX_24BRES	0x2000000000000000ull
+#define XCT_MACRX_40BRES	0x3000000000000000ull
+#define XCT_MACRX_O		0x0400000000000000ull
+#define XCT_MACRX_E		0x0200000000000000ull
+#define XCT_MACRX_FF		0x0100000000000000ull
+#define XCT_MACRX_PF		0x0080000000000000ull
+#define XCT_MACRX_OB		0x0040000000000000ull
+#define XCT_MACRX_OD		0x0020000000000000ull
+#define XCT_MACRX_FS		0x0010000000000000ull
+#define XCT_MACRX_NB_M		0x000fc00000000000ull
+#define XCT_MACRX_NB_S		46ULL
+#define XCT_MACRX_NB(x)		((((long)(x)) << XCT_MACRX_NB_S) & \
+				 XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M	0x00003fff00000000ull
+#define XCT_MACRX_LLEN_S	32ULL
+#define XCT_MACRX_LLEN(x)	((((long)(x)) << XCT_MACRX_LLEN_S) & \
+				 XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC		0x0000000080000000ull
+#define XCT_MACRX_LEN_M		0x0000000060000000ull
+#define XCT_MACRX_LEN_TOOSHORT	0x0000000020000000ull
+#define XCT_MACRX_LEN_BELOWMIN	0x0000000040000000ull
+#define XCT_MACRX_LEN_TRUNC	0x0000000060000000ull
+#define XCT_MACRX_CAST_M	0x0000000018000000ull
+#define XCT_MACRX_CAST_UNI	0x0000000000000000ull
+#define XCT_MACRX_CAST_MULTI	0x0000000008000000ull
+#define XCT_MACRX_CAST_BROAD	0x0000000010000000ull
+#define XCT_MACRX_CAST_PAUSE	0x0000000018000000ull
+#define XCT_MACRX_VLC_M		0x0000000006000000ull
+#define XCT_MACRX_FM		0x0000000001000000ull
+#define XCT_MACRX_HTY_M		0x0000000000c00000ull
+#define XCT_MACRX_HTY_IPV4_OK	0x0000000000000000ull
+#define XCT_MACRX_HTY_IPV6 	0x0000000000400000ull
+#define XCT_MACRX_HTY_IPV4_BAD	0x0000000000800000ull
+#define XCT_MACRX_HTY_NONIP	0x0000000000c00000ull
+#define XCT_MACRX_IPP_M		0x00000000003f0000ull
+#define XCT_MACRX_IPP_S		16
+#define XCT_MACRX_CSUM_M	0x000000000000ffffull
+#define XCT_MACRX_CSUM_S	0
+
+#define XCT_PTR_T		0x8000000000000000ull
+#define XCT_PTR_LEN_M		0x7ffff00000000000ull
+#define XCT_PTR_LEN_S		44
+#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & \
+				 XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M		0x00000fffffffffffull
+#define XCT_PTR_ADDR_S		0
+#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & \
+				 XCT_PTR_ADDR_M)
+
+/* Receive interface buffer fields */
+#define XCT_RXB_LEN_M		0x0ffff00000000000ull
+#define XCT_RXB_LEN_S		44
+#define XCT_RXB_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_RXB_ADDR_M		0x00000fffffffffffull
+#define XCT_RXB_ADDR_S		0
+#define XCT_RXB_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+
+
+#endif /* PASEMI_MAC_H */
Index: merge/MAINTAINERS
===================================================================
--- merge.orig/MAINTAINERS
+++ merge/MAINTAINERS
@@ -2490,6 +2490,12 @@ M:	olof@lixom.net
 L:	i2c@lm-sensors.org
 S:	Maintained
 
+PA SEMI ETHERNET DRIVER
+P:	Olof Johansson
+M:	olof@lixom.net
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 PARALLEL PORT SUPPORT
 P:	Phil Blundell
 M:	philb@gnu.org
Index: merge/include/linux/pci_ids.h
===================================================================
--- merge.orig/include/linux/pci_ids.h
+++ merge/include/linux/pci_ids.h
@@ -2064,6 +2064,8 @@
 #define PCI_VENDOR_ID_TDI               0x192E
 #define PCI_DEVICE_ID_TDI_EHCI          0x0101
 
+#define PCI_VENDOR_ID_PASEMI		0x1959
+
 #define PCI_VENDOR_ID_JMICRON		0x197B
 #define PCI_DEVICE_ID_JMICRON_JMB360	0x2360
 #define PCI_DEVICE_ID_JMICRON_JMB361	0x2361

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] [v4] PA Semi PWRficient Ethernet driver
  2007-02-01  3:43     ` [PATCH] [v4] " Olof Johansson
@ 2007-02-02 13:26       ` Jeff Garzik
  0 siblings, 0 replies; 23+ messages in thread
From: Jeff Garzik @ 2007-02-02 13:26 UTC (permalink / raw)
  To: Olof Johansson
  Cc: netdev, Stephen Hemminger, Francois Romieu, Christoph Hellwig,
	Ingo Oeser

Olof Johansson wrote:
> Driver for the PA Semi PWRficient on-chip Ethernet (1/10G)
> 
> Basic enablement, will be complemented with performance enhancements
> over time. PHY support will be added as well.
> 
> Signed-off-by: Olof Johansson <olof@lixom.net>

patch applied.  You may now send diffs for all future changes

	Jeff




^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2007-02-02 13:27 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-01-29  6:08 [PATCH] PA Semi PWRficient Ethernet driver Olof Johansson
2007-01-29 18:22 ` Stephen Hemminger
2007-01-30  1:41   ` Olof Johansson
2007-01-30  2:34     ` Jeff Garzik
2007-01-30 20:53       ` Olof Johansson
2007-01-29 22:35 ` Francois Romieu
2007-01-30  1:41   ` Olof Johansson
2007-01-30 10:06     ` Christoph Hellwig
2007-01-30 15:34       ` Olof Johansson
2007-01-30 21:45     ` Francois Romieu
2007-01-31  4:52       ` Olof Johansson
2007-01-30  1:44 ` [PATCH] [v2]PA " Olof Johansson
2007-01-31  5:44   ` [PATCH] [v3] PA " Olof Johansson
2007-01-31 10:34     ` Jeff Garzik
2007-02-01  3:40       ` Olof Johansson
2007-01-31 12:44     ` Ingo Oeser
2007-01-31 15:16       ` Olof Johansson
2007-01-31 18:38     ` Stephen Hemminger
2007-02-01  3:20       ` Olof Johansson
2007-02-01  3:43     ` [PATCH] [v4] " Olof Johansson
2007-02-02 13:26       ` Jeff Garzik
2007-01-30 10:03 ` [PATCH] " Christoph Hellwig
2007-01-30 15:36   ` Olof Johansson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.