* [PATCH 1/6] mv643xx_eth: remove redundant/useless code
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
@ 2004-12-13 22:12 ` Dale Farnsworth
2004-12-13 22:14 ` [PATCH 2/6] mv643xx_eth: replace fixed-count spin delays Dale Farnsworth
` (6 subsequent siblings)
7 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-13 22:12 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Russell King, Manish Lachwani, Brian Waite, Steven J. Hill
This patch removes code that is redundant or useless.
The biggest area is in pre-initializing the RX and TX descriptor
rings, which only obfuscates the driver since the ring data is
overwritten without being used.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-10 15:24:13.000000000 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-13 14:29:54.292321344 -0700
@@ -24,31 +24,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/fcntl.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ip.h>
#include <linux/init.h>
-#include <linux/in.h>
-#include <linux/pci.h>
-#include <linux/workqueue.h>
-#include <asm/smp.h>
-#include <linux/skbuff.h>
#include <linux/tcp.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <net/ip.h>
-
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/types.h>
@@ -387,10 +365,9 @@
* Output : number of served packets
*/
#ifdef MV64340_NAPI
-static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max,
- int budget)
+static int mv64340_eth_receive_queue(struct net_device *dev, int budget)
#else
-static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max)
+static int mv64340_eth_receive_queue(struct net_device *dev)
#endif
{
struct mv64340_private *mp = netdev_priv(dev);
@@ -402,7 +379,7 @@
#ifdef MV64340_NAPI
while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) {
#else
- while ((--max) && eth_port_receive(mp, &pkt_info) == ETH_OK) {
+ while (eth_port_receive(mp, &pkt_info) == ETH_OK) {
#endif
mp->rx_ring_skbs--;
received_packets++;
@@ -661,7 +638,7 @@
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
- int err = err;
+ int err;
spin_lock_irq(&mp->lock);
@@ -708,56 +685,25 @@
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
- * int rx_desc_num Number of Rx descriptors
- * int rx_buff_size Size of Rx buffer
- * unsigned int rx_desc_base_addr Rx descriptors memory area base addr.
- * unsigned int rx_buff_base_addr Rx buffer memory area base addr.
*
* OUTPUT:
* The routine updates the Ethernet port control struct with information
* regarding the Rx descriptors and buffers.
*
* RETURN:
- * false if the given descriptors memory area is not aligned according to
- * Ethernet SDMA specifications.
- * true otherwise.
+ * None.
*/
-static int ether_init_rx_desc_ring(struct mv64340_private * mp,
- unsigned long rx_buff_base_addr)
+static void ether_init_rx_desc_ring(struct mv64340_private * mp)
{
- unsigned long buffer_addr = rx_buff_base_addr;
volatile struct eth_rx_desc *p_rx_desc;
int rx_desc_num = mp->rx_ring_size;
- unsigned long rx_desc_base_addr = (unsigned long) mp->p_rx_desc_area;
- int rx_buff_size = 1536; /* Dummy, will be replaced later */
int i;
- p_rx_desc = (struct eth_rx_desc *) rx_desc_base_addr;
-
- /* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
- if (rx_buff_base_addr & 0xf)
- return 0;
-
- /* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes */
- if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
- return 0;
-
- /* Rx buffers must be 64-bit aligned. */
- if ((rx_buff_base_addr + rx_buff_size) & 0x7)
- return 0;
-
- /* initialize the Rx descriptors ring */
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = (struct eth_rx_desc *) mp->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
- p_rx_desc[i].buf_size = rx_buff_size;
- p_rx_desc[i].byte_cnt = 0x0000;
- p_rx_desc[i].cmd_sts =
- ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);
- p_rx_desc[i].buf_ptr = buffer_addr;
-
- mp->rx_skb[i] = NULL;
- buffer_addr += rx_buff_size;
}
/* Save Rx desc pointer to driver struct. */
@@ -766,9 +712,8 @@
mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
+ /* Add the queue to the list of RX queues of this port */
mp->port_rx_queue_command |= 1;
-
- return 1;
}
/*
@@ -785,57 +730,37 @@
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
- * int tx_desc_num Number of Tx descriptors
- * int tx_buff_size Size of Tx buffer
- * unsigned int tx_desc_base_addr Tx descriptors memory area base addr.
*
* OUTPUT:
* The routine updates the Ethernet port control struct with information
* regarding the Tx descriptors and buffers.
*
* RETURN:
- * false if the given descriptors memory area is not aligned according to
- * Ethernet SDMA specifications.
- * true otherwise.
+ * None.
*/
-static int ether_init_tx_desc_ring(struct mv64340_private *mp)
+static void ether_init_tx_desc_ring(struct mv64340_private *mp)
{
- unsigned long tx_desc_base_addr = (unsigned long) mp->p_tx_desc_area;
int tx_desc_num = mp->tx_ring_size;
struct eth_tx_desc *p_tx_desc;
int i;
- /* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
- if (tx_desc_base_addr & 0xf)
- return 0;
-
- /* save the first desc pointer to link with the last descriptor */
- p_tx_desc = (struct eth_tx_desc *) tx_desc_base_addr;
-
- /* Initialize the Tx descriptors ring */
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = (struct eth_tx_desc *) mp->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
- p_tx_desc[i].byte_cnt = 0x0000;
- p_tx_desc[i].l4i_chk = 0x0000;
- p_tx_desc[i].cmd_sts = 0x00000000;
p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);
- p_tx_desc[i].buf_ptr = 0x00000000;
- mp->tx_skb[i] = NULL;
}
- /* Set Tx desc pointer in driver struct. */
mp->tx_curr_desc_q = 0;
mp->tx_used_desc_q = 0;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
mp->tx_first_desc_q = 0;
#endif
- /* Init Tx ring base and size parameters */
+
mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
/* Add the queue to the list of Tx queues of this port */
mp->port_tx_queue_command |= 1;
-
- return 1;
}
/* Helper function for mv64340_eth_open */
@@ -917,8 +842,7 @@
}
memset(mp->p_rx_desc_area, 0, size);
- if (!(ether_init_rx_desc_ring(mp, 0)))
- panic("%s: Error initializing RX Ring", dev->name);
+ ether_init_rx_desc_ring(mp);
mv64340_eth_rx_task(dev); /* Fill RX ring with skb's */
@@ -1112,7 +1036,7 @@
orig_budget = *budget;
if (orig_budget > dev->quota)
orig_budget = dev->quota;
- work_done = mv64340_eth_receive_queue(dev, 0, orig_budget);
+ work_done = mv64340_eth_receive_queue(dev, orig_budget);
mp->rx_task.func(dev);
*budget -= work_done;
dev->quota -= work_done;
@@ -1403,8 +1327,6 @@
static void mv64340_eth_remove(struct net_device *dev)
{
- struct mv64340_private *mp = netdev_priv(dev);
-
unregister_netdev(dev);
flush_scheduled_work();
free_netdev(dev);
@@ -1627,18 +1549,6 @@
#define ETH_ENABLE_TX_QUEUE(eth_port) \
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
-#define ETH_DISABLE_TX_QUEUE(eth_port) \
- MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), \
- (1 << 8))
-
-#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
- MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
- (1 << rx_queue))
-
-#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
- MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
- (1 << (8 + rx_queue)))
-
#define LINK_UP_TIMEOUT 100000
#define PHY_BUSY_TIMEOUT 10000000
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 2/6] mv643xx_eth: replace fixed-count spin delays
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
2004-12-13 22:12 ` [PATCH 1/6] mv643xx_eth: remove redundant/useless code Dale Farnsworth
@ 2004-12-13 22:14 ` Dale Farnsworth
2004-12-14 23:11 ` Christoph Hellwig
2004-12-13 22:15 ` [PATCH 3/6] mv643xx_eth: fix hw checksum generation on transmit Dale Farnsworth
` (5 subsequent siblings)
7 siblings, 1 reply; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-13 22:14 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Russell King, Manish Lachwani, Brian Waite, Steven J. Hill
This patch removes spin delays (count to 1000000, ugh) and instead waits
with udelay or msleep for hardware flags to change.
It also adds a spinlock to protect access to the MV64340_ETH_SMI_REG,
which is shared across ports.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-13 14:29:54.292321344 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-13 14:29:55.829024727 -0700
@@ -10,6 +10,9 @@
*
* Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
*
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Dale Farnsworth <dale@farnsworth.org>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
@@ -28,10 +31,12 @@
#include <linux/tcp.h>
#include <linux/etherdevice.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/types.h>
#include <asm/pgtable.h>
#include <asm/system.h>
+#include <asm/delay.h>
#include "mv643xx_eth.h"
/*
@@ -66,6 +71,8 @@
unsigned char prom_mac_addr_base[6];
unsigned long mv64340_sram_base;
+static spinlock_t mv64340_eth_phy_lock = SPIN_LOCK_UNLOCKED;
+
/*
* Changes MTU (maximum transfer unit) of the gigabit ethenret port
*
@@ -770,6 +777,7 @@
unsigned int port_num = mp->port_num;
u32 phy_reg_data;
unsigned int size;
+ int i;
/* Stop RX Queues */
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
@@ -864,12 +872,16 @@
(MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num))
& 0xfff1ffff));
- /* Check Link status on phy */
- eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
- if (!(phy_reg_data & 0x20))
- netif_stop_queue(dev);
- else
- netif_start_queue(dev);
+ /* wait up to 1 second for link to come up */
+ for (i=0; i<10; i++) {
+ eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
+ if (phy_reg_data & 0x20) {
+ netif_start_queue(dev);
+ return 0;
+ }
+ msleep(100); /* sleep 1/10 second */
+ }
+ netif_stop_queue(dev);
return 0;
}
@@ -1549,9 +1561,6 @@
#define ETH_ENABLE_TX_QUEUE(eth_port) \
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
-#define LINK_UP_TIMEOUT 100000
-#define PHY_BUSY_TIMEOUT 10000000
-
/* locals */
/* PHY routines */
@@ -1888,38 +1897,26 @@
* ethernet_phy_reset - Reset Ethernet port PHY.
*
* DESCRIPTION:
- * This routine utilize the SMI interface to reset the ethernet port PHY.
- * The routine waits until the link is up again or link up is timeout.
+ * This routine utilizes the SMI interface to reset the ethernet port PHY.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
*
* OUTPUT:
- * The ethernet port PHY renew its link.
+ * The PHY is reset.
*
* RETURN:
* None.
*
*/
-static int ethernet_phy_reset(unsigned int eth_port_num)
+static void ethernet_phy_reset(unsigned int eth_port_num)
{
- unsigned int time_out = 50;
unsigned int phy_reg_data;
/* Reset the PHY */
eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
-
- /* Poll on the PHY LINK */
- do {
- eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
-
- if (time_out-- == 0)
- return 0;
- } while (!(phy_reg_data & 0x20));
-
- return 1;
}
/*
@@ -1940,62 +1937,48 @@
* None.
*
*/
-static void eth_port_reset(unsigned int eth_port_num)
+static void eth_port_reset(unsigned int port_num)
{
unsigned int reg_data;
/* Stop Tx port activity. Check port Tx activity. */
- reg_data =
- MV_READ(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port_num));
+ reg_data = MV_READ(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num));
if (reg_data & 0xFF) {
/* Issue stop command for active channels only */
- MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
- (eth_port_num), (reg_data << 8));
+ MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
+ (reg_data << 8));
/* Wait for all Tx activity to terminate. */
- do {
- /* Check port cause register that all Tx queues are stopped */
- reg_data =
- MV_READ
- (MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
- (eth_port_num));
- }
- while (reg_data & 0xFF);
+ /* Check port cause register that all Tx queues are stopped */
+ while (MV_READ(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num))
+ & 0xFF)
+ udelay(10);
}
/* Stop Rx port activity. Check port Rx activity. */
- reg_data =
- MV_READ(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
- (eth_port_num));
+ reg_data = MV_READ(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num));
if (reg_data & 0xFF) {
/* Issue stop command for active channels only */
- MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
- (eth_port_num), (reg_data << 8));
+ MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
+ (reg_data << 8));
/* Wait for all Rx activity to terminate. */
- do {
- /* Check port cause register that all Rx queues are stopped */
- reg_data =
- MV_READ
- (MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
- (eth_port_num));
- }
- while (reg_data & 0xFF);
+ /* Check port cause register that all Rx queues are stopped */
+ while (MV_READ(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num))
+ & 0xFF)
+ udelay(10);
}
/* Clear all MIB counters */
- eth_clear_mib_counters(eth_port_num);
+ eth_clear_mib_counters(port_num);
/* Reset the Enable bit in the Configuration Register */
- reg_data =
- MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num));
+ reg_data = MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num));
reg_data &= ~ETH_SERIAL_PORT_ENABLE;
- MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), reg_data);
-
- return;
+ MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data);
}
/*
@@ -2054,6 +2037,8 @@
return eth_config_reg;
}
+#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
+
/*
* eth_port_read_smi_reg - Read PHY registers
@@ -2063,7 +2048,7 @@
* order to perform PHY register read.
*
* INPUT:
- * unsigned int eth_port_num Ethernet Port number.
+ * unsigned int port_num Ethernet Port number.
* unsigned int phy_reg PHY register address offset.
* unsigned int *value Register value buffer.
*
@@ -2075,41 +2060,41 @@
* true otherwise.
*
*/
-static int eth_port_read_smi_reg(unsigned int eth_port_num,
+static void eth_port_read_smi_reg(unsigned int port_num,
unsigned int phy_reg, unsigned int *value)
{
- int phy_addr = ethernet_phy_get(eth_port_num);
- unsigned int time_out = PHY_BUSY_TIMEOUT;
- unsigned int reg_value;
-
- /* first check that it is not busy */
- do {
- reg_value = MV_READ(MV64340_ETH_SMI_REG);
- if (time_out-- == 0)
- return 0;
- } while (reg_value & ETH_SMI_BUSY);
+ int phy_addr = ethernet_phy_get(port_num);
+ unsigned long flags;
+ int i;
+
+ /* the SMI register is a shared resource */
+ spin_lock_irqsave(&mv64340_eth_phy_lock, flags);
- /* not busy */
+ /* wait for the SMI register to become available */
+ for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk("mv64340 PHY busy timeout, port %d\n", port_num);
+ goto out;
+ }
+ udelay(10);
+ }
MV_WRITE(MV64340_ETH_SMI_REG,
(phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
- time_out = PHY_BUSY_TIMEOUT; /* initialize the time out var again */
-
- do {
- reg_value = MV_READ(MV64340_ETH_SMI_REG);
- if (time_out-- == 0)
- return 0;
- } while (reg_value & ETH_SMI_READ_VALID);
-
- /* Wait for the data to update in the SMI register */
- for (time_out = 0; time_out < PHY_BUSY_TIMEOUT; time_out++);
-
- reg_value = MV_READ(MV64340_ETH_SMI_REG);
-
- *value = reg_value & 0xffff;
+ /* now wait for the data to be valid */
+ for (i=0; !(MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk("mv64340 PHY read timeout, port %d\n", port_num);
+ goto out;
+ }
+ udelay(10);
+ }
+
+ *value = MV_READ(MV64340_ETH_SMI_REG) & 0xffff;
- return 1;
+out:
+ spin_unlock_irqrestore(&mv64340_eth_phy_lock, flags);
}
/*
@@ -2132,27 +2117,32 @@
* true otherwise.
*
*/
-static int eth_port_write_smi_reg(unsigned int eth_port_num,
+static void eth_port_write_smi_reg(unsigned int eth_port_num,
unsigned int phy_reg, unsigned int value)
{
- unsigned int time_out = PHY_BUSY_TIMEOUT;
- unsigned int reg_value;
int phy_addr;
+ int i;
+ unsigned long flags;
phy_addr = ethernet_phy_get(eth_port_num);
- /* first check that it is not busy */
- do {
- reg_value = MV_READ(MV64340_ETH_SMI_REG);
- if (time_out-- == 0)
- return 0;
- } while (reg_value & ETH_SMI_BUSY);
+ /* the SMI register is a shared resource */
+ spin_lock_irqsave(&mv64340_eth_phy_lock, flags);
+
+ /* wait for the SMI register to become available */
+ for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk("mv64340 PHY busy timeout, port %d\n",
+ eth_port_num);
+ goto out;
+ }
+ udelay(10);
+ }
- /* not busy */
MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
ETH_SMI_OPCODE_WRITE | (value & 0xffff));
-
- return 1;
+out:
+ spin_unlock_irqrestore(&mv64340_eth_phy_lock, flags);
}
/*
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.h 2004-12-13 14:29:50.487055840 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h 2004-12-13 14:29:55.829024727 -0700
@@ -576,13 +576,13 @@
unsigned char *p_addr);
/* PHY and MIB routines */
-static int ethernet_phy_reset(unsigned int eth_port_num);
+static void ethernet_phy_reset(unsigned int eth_port_num);
-static int eth_port_write_smi_reg(unsigned int eth_port_num,
+static void eth_port_write_smi_reg(unsigned int eth_port_num,
unsigned int phy_reg,
unsigned int value);
-static int eth_port_read_smi_reg(unsigned int eth_port_num,
+static void eth_port_read_smi_reg(unsigned int eth_port_num,
unsigned int phy_reg,
unsigned int *value);
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/6] mv643xx_eth: replace fixed-count spin delays
2004-12-13 22:14 ` [PATCH 2/6] mv643xx_eth: replace fixed-count spin delays Dale Farnsworth
@ 2004-12-14 23:11 ` Christoph Hellwig
2004-12-15 18:03 ` Dale Farnsworth
0 siblings, 1 reply; 18+ messages in thread
From: Christoph Hellwig @ 2004-12-14 23:11 UTC (permalink / raw)
To: Dale Farnsworth
Cc: linux-kernel, Jeff Garzik, Ralf Baechle, Manish Lachwani,
Brian Waite, Steven J. Hill
On Mon, Dec 13, 2004 at 03:14:31PM -0700, Dale Farnsworth wrote:
> This patch removes spin delays (count to 1000000, ugh) and instead waits
> with udelay or msleep for hardware flags to change.
>
> It also adds a spinlock to protect access to the MV64340_ETH_SMI_REG,
> which is shared across ports.
Care to add a comment with this information? Driver-global locks are
something we tend to avoid, and cases like this one where it's actually
nessecary should be properly documented.
> + for (i=0; i<10; i++) {
This is missing some space, should be:
for (i = 0; i < 10; i++) {
> +#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
Put this into the header or at least ontop of the file?
> + /* wait for the SMI register to become available */
> + for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
Missing spaces again.
> + for (i=0; !(MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
Dito. (And a few more)
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/6] mv643xx_eth: replace fixed-count spin delays
2004-12-14 23:11 ` Christoph Hellwig
@ 2004-12-15 18:03 ` Dale Farnsworth
0 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-15 18:03 UTC (permalink / raw)
To: Christoph Hellwig, linux-kernel
On Tue, Dec 14, 2004 at 11:11:01PM +0000, Christoph Hellwig wrote:
> On Mon, Dec 13, 2004 at 03:14:31PM -0700, Dale Farnsworth wrote:
> > This patch removes spin delays (count to 1000000, ugh) and instead waits
> > with udelay or msleep for hardware flags to change.
> >
> > It also adds a spinlock to protect access to the MV64340_ETH_SMI_REG,
> > which is shared across ports.
>
> Care to add a comment with this information? Driver-global locks are
> something we tend to avoid, and cases like this one where it's actually
> nessecary should be properly documented.
I did have comments where the spinlock is used. I'll add one at the
spinlock definition as well.
>
> > + for (i=0; i<10; i++) {
>
> This is missing some space, should be:
>
> for (i = 0; i < 10; i++) {
Ok. I'll fix.
> > +#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
>
> Put this into the header or at least ontop of the file?
Will do.
> > + /* wait for the SMI register to become available */
> > + for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
>
> Missing spaces again.
>
> > + for (i=0; !(MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
>
> Dito. (And a few more)
I'll fix them all.
Thanks,
-Dale
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 3/6] mv643xx_eth: fix hw checksum generation on transmit
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
2004-12-13 22:12 ` [PATCH 1/6] mv643xx_eth: remove redundant/useless code Dale Farnsworth
2004-12-13 22:14 ` [PATCH 2/6] mv643xx_eth: replace fixed-count spin delays Dale Farnsworth
@ 2004-12-13 22:15 ` Dale Farnsworth
[not found] ` <41BE1744.4060502@penguin.mvista>
2004-12-14 23:15 ` Christoph Hellwig
2004-12-13 22:18 ` [PATCH 4/6] mv643xx_eth: Convert from pci_map_* to dma_map_* interface Dale Farnsworth
` (4 subsequent siblings)
7 siblings, 2 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-13 22:15 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Russell King, Manish Lachwani, Brian Waite, Steven J. Hill
This patch fixes the code that enables hardware checksum generation.
The previous code has so many problems that it appears to never have worked.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-13 14:29:55.829024727 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-13 14:30:34.651531163 -0700
@@ -29,6 +29,7 @@
*/
#include <linux/init.h>
#include <linux/tcp.h>
+#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/bitops.h>
#include <linux/delay.h>
@@ -57,6 +58,11 @@
#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
#endif
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
+#else
+#define MAX_DESCS_PER_SKB 1
+#endif
/* Static function declarations */
static int mv64340_eth_real_open(struct net_device *);
@@ -333,25 +339,29 @@
* last skb releases the whole chain.
*/
if (pkt_info.return_info) {
- dev_kfree_skb_irq((struct sk_buff *)
- pkt_info.return_info);
- released = 0;
if (skb_shinfo(pkt_info.return_info)->nr_frags)
pci_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, PCI_DMA_TODEVICE);
- if (mp->tx_ring_skbs != 1)
- mp->tx_ring_skbs--;
+ dev_kfree_skb_irq((struct sk_buff *)
+ pkt_info.return_info);
+ released = 0;
+
+ /*
+ * Decrement the number of outstanding skbs counter on
+ * the TX queue.
+ */
+ if (mp->tx_ring_skbs == 0)
+ panic("ERROR - TX outstanding SKBs"
+ "counter is corrupted");
+ mp->tx_ring_skbs--;
} else
pci_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, PCI_DMA_TODEVICE);
- /*
- * Decrement the number of outstanding skbs counter on
- * the TX queue.
- */
- if (mp->tx_ring_skbs == 0)
- panic("ERROR - TX outstanding SKBs counter is corrupted");
}
@@ -489,7 +499,8 @@
/* UDP change : We may need this */
if ((eth_int_cause_ext & 0x0000ffff) &&
(mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
- (MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1))
+ (MV64340_TX_QUEUE_SIZE >
+ mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
netif_wake_queue(dev);
#ifdef MV64340_NAPI
} else {
@@ -1004,22 +1015,27 @@
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
if (pkt_info.return_info) {
- dev_kfree_skb_irq((struct sk_buff *)
- pkt_info.return_info);
if (skb_shinfo(pkt_info.return_info)->nr_frags)
pci_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt,
PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ PCI_DMA_TODEVICE);
- if (mp->tx_ring_skbs != 1)
- mp->tx_ring_skbs--;
+ dev_kfree_skb_irq((struct sk_buff *)
+ pkt_info.return_info);
+
+ if (mp->tx_ring_skbs != 0)
+ mp->tx_ring_skbs--;
} else
pci_unmap_page(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt,
PCI_DMA_TODEVICE);
}
if (netif_queue_stopped(dev) &&
- MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1)
+ MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)
netif_wake_queue(dev);
}
@@ -1118,39 +1134,75 @@
/* Update packet info data structure -- DMA owned, first last */
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
- if (!skb_shinfo(skb)->nr_frags || (skb_shinfo(skb)->nr_frags > 3)) {
-#endif
- pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
- ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
+ if (!skb_shinfo(skb)->nr_frags) {
+ if (skb->ip_summed != CHECKSUM_HW)
+ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
+ ETH_TX_FIRST_DESC |
+ ETH_TX_LAST_DESC;
+ else {
+ u32 ipheader = skb->nh.iph->ihl << 11;
+ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
+ ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC |
+ ETH_GEN_TCP_UDP_CHECKSUM |
+ ETH_GEN_IP_V_4_CHECKSUM |
+ ipheader;
+ /* CPU already calculated pseudo header checksum. */
+ if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ pkt_info.cmd_sts |= ETH_UDP_FRAME;
+ pkt_info.l4i_chk = skb->h.uh->check;
+ }
+ else if (skb->nh.iph->protocol == IPPROTO_TCP)
+ pkt_info.l4i_chk = skb->h.th->check;
+ else {
+ printk(KERN_ERR
+ "%s: chksum proto != TCP or UDP\n",
+ dev->name);
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 1;
+ }
+ }
pkt_info.byte_cnt = skb->len;
pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
PCI_DMA_TODEVICE);
-
-
pkt_info.return_info = skb;
status = eth_port_send(mp, &pkt_info);
if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
printk(KERN_ERR "%s: Error on transmitting packet\n",
dev->name);
mp->tx_ring_skbs++;
-#ifdef MV64340_CHECKSUM_OFFLOAD_TX
} else {
unsigned int frag;
- u32 ipheader;
+ u32 ipheader = skb->nh.iph->ihl << 11;
/* first frag which is skb header */
pkt_info.byte_cnt = skb_headlen(skb);
pkt_info.buf_ptr = pci_map_single(0, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE);
pkt_info.return_info = 0;
- ipheader = skb->nh.iph->ihl << 11;
- pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
- ETH_GEN_TCP_UDP_CHECKSUM |
+ pkt_info.cmd_sts = ETH_TX_FIRST_DESC;
+
+ if (skb->ip_summed == CHECKSUM_HW) {
+ /* CPU already calculated pseudo header checksum. */
+ pkt_info.cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
ETH_GEN_IP_V_4_CHECKSUM |
- ipheader;
- /* CPU already calculated pseudo header checksum. So, use it */
- pkt_info.l4i_chk = skb->h.th->check;
+ ipheader;
+ /* CPU already calculated pseudo header checksum. */
+ if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ pkt_info.cmd_sts |= ETH_UDP_FRAME;
+ pkt_info.l4i_chk = skb->h.uh->check;
+ }
+ else if (skb->nh.iph->protocol == IPPROTO_TCP)
+ pkt_info.l4i_chk = skb->h.th->check;
+ else {
+ printk(KERN_ERR
+ "%s: chksum proto != TCP or UDP\n",
+ dev->name);
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 1;
+ }
+ }
+
status = eth_port_send(mp, &pkt_info);
if (status != ETH_OK) {
if ((status == ETH_ERROR))
@@ -1178,8 +1230,6 @@
pkt_info.return_info = 0;
}
pkt_info.byte_cnt = this_frag->size;
- if (this_frag->size < 8)
- printk("%d : \n", skb_shinfo(skb)->nr_frags);
pkt_info.buf_ptr = pci_map_page(NULL, this_frag->page,
this_frag->page_offset,
@@ -1199,12 +1249,24 @@
}
}
}
+#else
+ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
+ ETH_TX_LAST_DESC;
+ pkt_info.byte_cnt = skb->len;
+ pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ pkt_info.return_info = skb;
+ status = eth_port_send(mp, &pkt_info);
+ if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
+ printk(KERN_ERR "%s: Error on transmitting packet\n",
+ dev->name);
+ mp->tx_ring_skbs++;
#endif
/* Check if TX queue can handle another skb. If not, then
* signal higher layers to stop requesting TX
*/
- if (MV64340_TX_QUEUE_SIZE <= (mp->tx_ring_skbs + 1))
+ if (MV64340_TX_QUEUE_SIZE <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
/*
* Stop getting skb's from upper layers.
* Getting skb's from upper layers will be enabled again after
@@ -2180,84 +2242,60 @@
struct pkt_info * p_pkt_info)
{
int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc;
- volatile struct eth_tx_desc *current_descriptor;
- volatile struct eth_tx_desc *first_descriptor;
- u32 command_status, first_chip_ptr;
+ struct eth_tx_desc *current_descriptor;
+ struct eth_tx_desc *first_descriptor;
+ u32 command;
/* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err)
return ETH_QUEUE_FULL;
+ /*
+ * The hardware requires that each buffer that is <= 8 bytes
+ * in length must be aligned on an 8 byte boundary.
+ */
+ if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) {
+ printk(KERN_ERR
+ "mv64340_eth port %d: packet size <= 8 problem\n",
+ mp->port_num);
+ return ETH_ERROR;
+ }
+
/* Get the Tx Desc ring indexes */
tx_desc_curr = mp->tx_curr_desc_q;
tx_desc_used = mp->tx_used_desc_q;
current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
- if (current_descriptor == NULL)
- return ETH_ERROR;
tx_next_desc = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
- command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
-
- if (command_status & ETH_TX_FIRST_DESC) {
- tx_first_desc = tx_desc_curr;
- mp->tx_first_desc_q = tx_first_desc;
-
- /* fill first descriptor */
- first_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
- first_descriptor->l4i_chk = p_pkt_info->l4i_chk;
- first_descriptor->cmd_sts = command_status;
- first_descriptor->byte_cnt = p_pkt_info->byte_cnt;
- first_descriptor->buf_ptr = p_pkt_info->buf_ptr;
- first_descriptor->next_desc_ptr = mp->tx_desc_dma +
- tx_next_desc * sizeof(struct eth_tx_desc);
- wmb();
- } else {
- tx_first_desc = mp->tx_first_desc_q;
- first_descriptor = &mp->p_tx_desc_area[tx_first_desc];
- if (first_descriptor == NULL) {
- printk("First desc is NULL !!\n");
- return ETH_ERROR;
- }
- if (command_status & ETH_TX_LAST_DESC)
- current_descriptor->next_desc_ptr = 0x00000000;
- else {
- command_status |= ETH_BUFFER_OWNED_BY_DMA;
- current_descriptor->next_desc_ptr = mp->tx_desc_dma +
- tx_next_desc * sizeof(struct eth_tx_desc);
- }
- }
-
- if (p_pkt_info->byte_cnt < 8) {
- printk(" < 8 problem \n");
- return ETH_ERROR;
- }
current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
- current_descriptor->cmd_sts = command_status;
-
mp->tx_skb[tx_desc_curr] = (struct sk_buff*) p_pkt_info->return_info;
- wmb();
+ command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC |
+ ETH_BUFFER_OWNED_BY_DMA;
+ if (command & ETH_TX_LAST_DESC)
+ command |= ETH_TX_ENABLE_INTERRUPT;
- /* Set last desc with DMA ownership and interrupt enable. */
- if (command_status & ETH_TX_LAST_DESC) {
- current_descriptor->cmd_sts = command_status |
- ETH_TX_ENABLE_INTERRUPT |
- ETH_BUFFER_OWNED_BY_DMA;
+ if (command & ETH_TX_FIRST_DESC) {
+ tx_first_desc = tx_desc_curr;
+ mp->tx_first_desc_q = tx_first_desc;
+ first_descriptor = current_descriptor;
+ mp->tx_first_command = command;
+ } else {
+ tx_first_desc = mp->tx_first_desc_q;
+ first_descriptor = &mp->p_tx_desc_area[tx_first_desc];
+ BUG_ON(first_descriptor == NULL);
+ current_descriptor->cmd_sts = command;
+ }
- if (!(command_status & ETH_TX_FIRST_DESC))
- first_descriptor->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
+ if (command & ETH_TX_LAST_DESC) {
wmb();
+ first_descriptor->cmd_sts = mp->tx_first_command;
- first_chip_ptr = MV_READ(MV64340_ETH_CURRENT_SERVED_TX_DESC_PTR(mp->port_num));
-
- /* Apply send command */
- if (first_chip_ptr == 0x00000000)
- MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(mp->port_num), (struct eth_tx_desc *) mp->tx_desc_dma + tx_first_desc);
-
+ wmb();
ETH_ENABLE_TX_QUEUE(mp->port_num);
/*
@@ -2265,13 +2303,9 @@
* error */
tx_first_desc = tx_next_desc;
mp->tx_first_desc_q = tx_first_desc;
- } else {
- if (! (command_status & ETH_TX_FIRST_DESC) ) {
- current_descriptor->cmd_sts = command_status;
- wmb();
- }
}
+
/* Check for ring index overlap in the Tx desc ring */
if (tx_next_desc == tx_desc_used) {
mp->tx_resource_err = 1;
@@ -2281,7 +2315,6 @@
}
mp->tx_curr_desc_q = tx_next_desc;
- wmb();
return ETH_OK;
}
@@ -2291,7 +2324,7 @@
{
int tx_desc_curr;
int tx_desc_used;
- volatile struct eth_tx_desc* current_descriptor;
+ struct eth_tx_desc *current_descriptor;
unsigned int command_status;
/* Do not process Tx ring in case of Tx ring resource error */
@@ -2303,32 +2336,18 @@
tx_desc_used = mp->tx_used_desc_q;
current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
- if (current_descriptor == NULL)
- return ETH_ERROR;
-
command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
-
-/* XXX Is this for real ?!?!? */
- /* Buffers with a payload smaller than 8 bytes must be aligned to a
- * 64-bit boundary. We use the memory allocated for Tx descriptor.
- * This memory is located in TX_BUF_OFFSET_IN_DESC offset within the
- * Tx descriptor. */
- if (p_pkt_info->byte_cnt <= 8) {
- printk(KERN_ERR
- "You have failed in the < 8 bytes errata - fixme\n");
- return ETH_ERROR;
- }
current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
mp->tx_skb[tx_desc_curr] = (struct sk_buff *) p_pkt_info->return_info;
- mb();
/* Set last desc with DMA ownership and interrupt enable. */
+ wmb();
current_descriptor->cmd_sts = command_status |
ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
- /* Apply send command */
+ wmb();
ETH_ENABLE_TX_QUEUE(mp->port_num);
/* Finish Tx packet. Update first desc in case of Tx resource error */
@@ -2374,40 +2393,33 @@
static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private * mp,
struct pkt_info * p_pkt_info)
{
- int tx_desc_used, tx_desc_curr;
+ int tx_desc_used;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
- int tx_first_desc;
+ int tx_busy_desc = mp->tx_first_desc_q;
+#else
+ int tx_busy_desc = mp->tx_curr_desc_q;
#endif
- volatile struct eth_tx_desc *p_tx_desc_used;
+ struct eth_tx_desc *p_tx_desc_used;
unsigned int command_status;
/* Get the Tx Desc ring indexes */
- tx_desc_curr = mp->tx_curr_desc_q;
tx_desc_used = mp->tx_used_desc_q;
-#ifdef MV64340_CHECKSUM_OFFLOAD_TX
- tx_first_desc = mp->tx_first_desc_q;
-#endif
+
p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
- /* XXX Sanity check */
+ /* Sanity check */
if (p_tx_desc_used == NULL)
return ETH_ERROR;
+ /* Stop release. About to overlap the current available Tx descriptor */
+ if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err)
+ return ETH_END_OF_JOB;
+
command_status = p_tx_desc_used->cmd_sts;
/* Still transmitting... */
-#ifndef MV64340_CHECKSUM_OFFLOAD_TX
if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
return ETH_RETRY;
-#endif
- /* Stop release. About to overlap the current available Tx descriptor */
-#ifdef MV64340_CHECKSUM_OFFLOAD_TX
- if (tx_desc_used == tx_first_desc && !mp->tx_resource_err)
- return ETH_END_OF_JOB;
-#else
- if (tx_desc_used == tx_desc_curr && !mp->tx_resource_err)
- return ETH_END_OF_JOB;
-#endif
/* Pass the packet information to the caller */
p_pkt_info->cmd_sts = command_status;
@@ -2488,7 +2500,7 @@
if (rx_next_curr_desc == rx_used_desc)
mp->rx_resource_err = 1;
- mb();
+ rmb();
return ETH_OK;
}
@@ -2527,14 +2539,12 @@
mp->rx_skb[used_rx_desc] = p_pkt_info->return_info;
/* Flush the write pipe */
- mb();
/* Return the descriptor to DMA ownership */
+ wmb();
p_used_rx_desc->cmd_sts =
ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
-
- /* Flush descriptor and CPU pipe */
- mb();
+ wmb();
/* Move the used descriptor pointer to the next descriptor */
mp->rx_used_desc_q = (used_rx_desc + 1) % MV64340_RX_QUEUE_SIZE;
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.h 2004-12-13 14:30:28.455727084 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h 2004-12-13 14:30:34.652530970 -0700
@@ -511,18 +511,19 @@
int tx_curr_desc_q, tx_used_desc_q;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
int tx_first_desc_q;
+ u32 tx_first_command;
#endif
#ifdef MV64340_TX_FAST_REFILL
u32 tx_clean_threshold;
#endif
- volatile struct eth_rx_desc * p_rx_desc_area;
+ struct eth_rx_desc * p_rx_desc_area;
dma_addr_t rx_desc_dma;
unsigned int rx_desc_area_size;
struct sk_buff * rx_skb[MV64340_RX_QUEUE_SIZE];
- volatile struct eth_tx_desc * p_tx_desc_area;
+ struct eth_tx_desc * p_tx_desc_area;
dma_addr_t tx_desc_dma;
unsigned int tx_desc_area_size;
struct sk_buff * tx_skb[MV64340_TX_QUEUE_SIZE];
^ permalink raw reply [flat|nested] 18+ messages in thread
[parent not found: <41BE1744.4060502@penguin.mvista>]
* Re: [PATCH 3/6] mv643xx_eth: fix hw checksum generation on transmit
[not found] ` <41BE1744.4060502@penguin.mvista>
@ 2004-12-14 5:03 ` Dale Farnsworth
0 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-14 5:03 UTC (permalink / raw)
To: Manish Lachwani, linux-kernel
On Mon, Dec 13, 2004 at 10:27:16PM +0000, Manish Lachwani wrote:
> Hi Dale,
>
> Just want to let you know that the checksum offload code worked well in
> 2.4. I have the numbers with me. On a Jaguar ATX board (Discovery II
> controller), TCP throughput measured using netperf was 920 Mb/s. As far
> as 2.6 goes, I dont have any idea if the checksum offload worked
>
> Dale Farnsworth wrote:
> >This patch fixes the code that enables hardware checksum generation.
> >The previous code has so many problems that it appears to never have
> >worked.
I haven't tried it on 2.4, so can't comment there. Something like
my patch seems needed for 2.6 though. So, I'll revise my comment
to say that I don't see how the previous code could have worked on
current 2.6
-Dale
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 3/6] mv643xx_eth: fix hw checksum generation on transmit
2004-12-13 22:15 ` [PATCH 3/6] mv643xx_eth: fix hw checksum generation on transmit Dale Farnsworth
[not found] ` <41BE1744.4060502@penguin.mvista>
@ 2004-12-14 23:15 ` Christoph Hellwig
2004-12-15 18:12 ` Dale Farnsworth
1 sibling, 1 reply; 18+ messages in thread
From: Christoph Hellwig @ 2004-12-14 23:15 UTC (permalink / raw)
To: Dale Farnsworth
Cc: linux-kernel, Jeff Garzik, Ralf Baechle, Russell King,
Manish Lachwani, Brian Waite, Steven J. Hill
> + dev_kfree_skb_irq((struct sk_buff *)
> + pkt_info.return_info);
pkt_info.return_info already is a pointer to struct sk_buff
> + /* CPU already calculated pseudo header checksum. */
> + if (skb->nh.iph->protocol == IPPROTO_UDP) {
> + pkt_info.cmd_sts |= ETH_UDP_FRAME;
> + pkt_info.l4i_chk = skb->h.uh->check;
> + }
> + else if (skb->nh.iph->protocol == IPPROTO_TCP)
> + pkt_info.l4i_chk = skb->h.th->check;
> + else {
} else if (skb->nh.iph->protocol == IPPROTO_TCP) {
pkt_info.l4i_chk = skb->h.th->check;
} else {
> + pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
> + PCI_DMA_TODEVICE);
s/0/NULL/ to avoid sparse warnings
> + /*
> + * The hardware requires that each buffer that is <= 8 bytes
> + * in length must be aligned on an 8 byte boundary.
> + */
> + if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) {
please use tabs, not spaces for indentation.
> #ifdef MV64340_CHECKSUM_OFFLOAD_TX
> - int tx_first_desc;
> + int tx_busy_desc = mp->tx_first_desc_q;
Again.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 3/6] mv643xx_eth: fix hw checksum generation on transmit
2004-12-14 23:15 ` Christoph Hellwig
@ 2004-12-15 18:12 ` Dale Farnsworth
0 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-15 18:12 UTC (permalink / raw)
To: Christoph Hellwig, linux-kernel
On Tue, Dec 14, 2004 at 11:15:55PM +0000, Christoph Hellwig wrote:
> > + dev_kfree_skb_irq((struct sk_buff *)
> > + pkt_info.return_info);
>
> pkt_info.return_info already is a pointer to struct sk_buff
Yep. This line was moved, but not changed by my patch. I'll remove the
cast.
> > + /* CPU already calculated pseudo header checksum. */
> > + if (skb->nh.iph->protocol == IPPROTO_UDP) {
> > + pkt_info.cmd_sts |= ETH_UDP_FRAME;
> > + pkt_info.l4i_chk = skb->h.uh->check;
> > + }
> > + else if (skb->nh.iph->protocol == IPPROTO_TCP)
> > + pkt_info.l4i_chk = skb->h.th->check;
> > + else {
>
> } else if (skb->nh.iph->protocol == IPPROTO_TCP) {
> pkt_info.l4i_chk = skb->h.th->check;
> } else {
>
> > + pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
> > + PCI_DMA_TODEVICE);
>
> s/0/NULL/ to avoid sparse warnings
Another line that was moved but not changed. In patch 4/6 I changed it
to dma_map_single(NULL, ...
> > + /*
> > + * The hardware requires that each buffer that is <= 8 bytes
> > + * in length must be aligned on an 8 byte boundary.
> > + */
> > + if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) {
>
> please use tabs, not spaces for indentation.
Again, I didn't change the indentation on this line. The driver is
replete with whitespace problems. I don't want to address them now,
but as soon as it's confirmed that these patches are "in the queue",
I'll submit several cosmetic patches: white space cleanup, rename
from 64340 to 643xx, rename MV_READ/MV_WRITE to mv_read/mv_write,
and a few localized code simplifications.
> > #ifdef MV64340_CHECKSUM_OFFLOAD_TX
> > - int tx_first_desc;
> > + int tx_busy_desc = mp->tx_first_desc_q;
>
> Again.
Thanks,
-Dale
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 4/6] mv643xx_eth: Convert from pci_map_* to dma_map_* interface
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
` (2 preceding siblings ...)
2004-12-13 22:15 ` [PATCH 3/6] mv643xx_eth: fix hw checksum generation on transmit Dale Farnsworth
@ 2004-12-13 22:18 ` Dale Farnsworth
2004-12-13 22:19 ` [PATCH 5/6] mv643xx_eth: Add support for platform device interface Dale Farnsworth
` (3 subsequent siblings)
7 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-13 22:18 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Russell King, Manish Lachwani, Brian Waite, Steven J. Hill
This patch replaces the use of the pci_map_* functions with the
corresponding dma_map_* functions.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-13 14:30:34.651531163 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-13 14:30:37.436993510 -0700
@@ -154,9 +154,9 @@
pkt_info.byte_cnt += 8;
}
pkt_info.buf_ptr =
- pci_map_single(0, skb->data,
+ dma_map_single(NULL, skb->data,
dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
pkt_info.return_info = skb;
if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
printk(KERN_ERR
@@ -340,11 +340,11 @@
*/
if (pkt_info.return_info) {
if (skb_shinfo(pkt_info.return_info)->nr_frags)
- pci_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt, PCI_DMA_TODEVICE);
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
else
- pci_unmap_single(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt, PCI_DMA_TODEVICE);
+ dma_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
dev_kfree_skb_irq((struct sk_buff *)
pkt_info.return_info);
@@ -359,8 +359,8 @@
"counter is corrupted");
mp->tx_ring_skbs--;
} else
- pci_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt, PCI_DMA_TODEVICE);
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
}
@@ -827,7 +827,8 @@
mp->tx_desc_area_size = size;
/* Assumes allocated ring is 16 bytes alligned */
- mp->p_tx_desc_area = pci_alloc_consistent(NULL, size, &mp->tx_desc_dma);
+ mp->p_tx_desc_area = dma_alloc_coherent(NULL, size, &mp->tx_desc_dma,
+ GFP_KERNEL);
if (!mp->p_tx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
dev->name, size);
@@ -847,16 +848,16 @@
/* Assumes allocated ring is 16 bytes aligned */
- mp->p_rx_desc_area = pci_alloc_consistent(NULL, size, &mp->rx_desc_dma);
+ mp->p_rx_desc_area = dma_alloc_coherent(NULL, size, &mp->rx_desc_dma,
+ GFP_KERNEL);
if (!mp->p_rx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
dev->name, size);
printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
dev->name);
- pci_free_consistent(0, mp->tx_desc_area_size,
- (void *) mp->p_tx_desc_area,
- mp->tx_desc_dma);
+ dma_free_coherent(NULL, mp->tx_desc_area_size,
+ mp->p_tx_desc_area, mp->tx_desc_dma);
return -ENOMEM;
}
memset(mp->p_rx_desc_area, 0, size);
@@ -921,8 +922,8 @@
printk("%s: Error on Tx descriptor free - could not free %d"
" descriptors\n", dev->name,
mp->tx_ring_skbs);
- pci_free_consistent(0, mp->tx_desc_area_size,
- (void *) mp->p_tx_desc_area, mp->tx_desc_dma);
+ dma_free_coherent(0, mp->tx_desc_area_size,
+ mp->p_tx_desc_area, mp->tx_desc_dma);
}
static void mv64340_eth_free_rx_rings(struct net_device *dev)
@@ -951,9 +952,8 @@
"%s: Error in freeing Rx Ring. %d skb's still"
" stuck in RX Ring - ignoring them\n", dev->name,
mp->rx_ring_skbs);
- pci_free_consistent(0, mp->rx_desc_area_size,
- (void *) mp->p_rx_desc_area,
- mp->rx_desc_dma);
+ dma_free_coherent(NULL, mp->rx_desc_area_size,
+ mp->p_rx_desc_area, mp->rx_desc_dma);
}
/*
@@ -1016,13 +1016,11 @@
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
if (pkt_info.return_info) {
if (skb_shinfo(pkt_info.return_info)->nr_frags)
- pci_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
else
- pci_unmap_single(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
dev_kfree_skb_irq((struct sk_buff *)
pkt_info.return_info);
@@ -1030,8 +1028,8 @@
if (mp->tx_ring_skbs != 0)
mp->tx_ring_skbs--;
} else
- pci_unmap_page(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
}
if (netif_queue_stopped(dev) &&
@@ -1163,8 +1161,8 @@
}
}
pkt_info.byte_cnt = skb->len;
- pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
pkt_info.return_info = skb;
status = eth_port_send(mp, &pkt_info);
if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
@@ -1177,8 +1175,8 @@
/* first frag which is skb header */
pkt_info.byte_cnt = skb_headlen(skb);
- pkt_info.buf_ptr = pci_map_single(0, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
pkt_info.return_info = 0;
pkt_info.cmd_sts = ETH_TX_FIRST_DESC;
@@ -1231,9 +1229,9 @@
}
pkt_info.byte_cnt = this_frag->size;
- pkt_info.buf_ptr = pci_map_page(NULL, this_frag->page,
+ pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page,
this_frag->page_offset,
- this_frag->size, PCI_DMA_TODEVICE);
+ this_frag->size, DMA_TO_DEVICE);
status = eth_port_send(mp, &pkt_info);
@@ -1253,8 +1251,8 @@
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
ETH_TX_LAST_DESC;
pkt_info.byte_cnt = skb->len;
- pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
pkt_info.return_info = skb;
status = eth_port_send(mp, &pkt_info);
if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 5/6] mv643xx_eth: Add support for platform device interface
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
` (3 preceding siblings ...)
2004-12-13 22:18 ` [PATCH 4/6] mv643xx_eth: Convert from pci_map_* to dma_map_* interface Dale Farnsworth
@ 2004-12-13 22:19 ` Dale Farnsworth
2004-12-14 23:19 ` Christoph Hellwig
2004-12-13 22:20 ` [PATCH 6/6] mv643xx_eth: add configurable parameters via " Dale Farnsworth
` (2 subsequent siblings)
7 siblings, 1 reply; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-13 22:19 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Russell King, Manish Lachwani, Brian Waite, Steven J. Hill
This patch adds platform device support to the mv643xx_eth driver.
This is a change to the driver's programming interface. Platform
code must now pass in the address of the MV643xx ethernet registers
and IRQ. If firmware doesn't set the MAC address, platform code
must also pass in the MAC address.
Also, note that local MV_READ/MV_WRITE macros are used rather than
using global macros.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-13 14:30:37.436993510 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-13 14:30:39.866524559 -0700
@@ -65,6 +65,8 @@
#endif
/* Static function declarations */
+static void eth_port_uc_addr_get(struct net_device *dev,
+ unsigned char *MacAddr);
static int mv64340_eth_real_open(struct net_device *);
static int mv64340_eth_real_stop(struct net_device *);
static int mv64340_eth_change_mtu(struct net_device *, int);
@@ -74,11 +76,19 @@
static int mv64340_poll(struct net_device *dev, int *budget);
#endif
-unsigned char prom_mac_addr_base[6];
-unsigned long mv64340_sram_base;
+static void __iomem *mv64x60_eth_shared_base;
static spinlock_t mv64340_eth_phy_lock = SPIN_LOCK_UNLOCKED;
+#undef MV_READ
+#define MV_READ(offset) \
+ readl(mv64x60_eth_shared_base - MV64340_ETH_SHARED_REGS + offset)
+
+#undef MV_WRITE
+#define MV_WRITE(offset, data) \
+ writel((u32)data, \
+ mv64x60_eth_shared_base - MV64340_ETH_SHARED_REGS + offset)
+
/*
* Changes MTU (maximum transfer unit) of the gigabit ethenret port
*
@@ -1300,29 +1310,43 @@
}
/*/
- * mv64340_eth_init
+ * mv64340_eth_probe
*
* First function called after registering the network device.
* It's purpose is to initialize the device as an ethernet device,
- * fill the structure that was given in registration with pointers
- * to functions, and setting the MAC address of the interface
+ * fill the ethernet device structure with pointers * to functions,
+ * and set the MAC address of the interface
*
- * Input : number of port to initialize
- * Output : -ENONMEM if failed , 0 if success
+ * Input : struct device *
+ * Output : -ENOMEM or -ENODEV if failed , 0 if success
*/
-static struct net_device *mv64340_eth_init(int port_num)
+static int mv64340_eth_probe(struct device *ddev)
{
+ struct platform_device *pdev = to_platform_device(ddev);
+ struct mv64xxx_eth_platform_data *pd;
+ int port_num = pdev->id;
struct mv64340_private *mp;
struct net_device *dev;
+ u8 *p;
+ struct resource *res;
int err;
dev = alloc_etherdev(sizeof(struct mv64340_private));
if (!dev)
- return NULL;
+ return -ENOMEM;
+
+ dev_set_drvdata(ddev, dev);
mp = netdev_priv(dev);
- dev->irq = ETH_PORT0_IRQ_NUM + port_num;
+ if ((res = platform_get_resource(pdev, IORESOURCE_IRQ, 0)))
+ dev->irq = res->start;
+ else {
+ err = -ENODEV;
+ goto out;
+ }
+
+ mp->port_num = port_num;
dev->open = mv64340_eth_open;
dev->stop = mv64340_eth_stop;
@@ -1355,58 +1379,111 @@
#endif
#endif
- mp->port_num = port_num;
/* Configure the timeout task */
INIT_WORK(&mp->tx_timeout_task,
(void (*)(void *))mv64340_eth_tx_timeout_task, dev);
spin_lock_init(&mp->lock);
-
- /* set MAC addresses */
- memcpy(dev->dev_addr, prom_mac_addr_base, 6);
- dev->dev_addr[5] += port_num;
+
+ /* set default config values */
+ eth_port_uc_addr_get(dev, dev->dev_addr);
+ pd = pdev->dev.platform_data;
+ if (pd) {
+ if (pd->mac_addr != NULL)
+ memcpy(dev->dev_addr, pd->mac_addr, 6);
+ }
err = register_netdev(dev);
if (err)
- goto out_free_dev;
+ goto out;
- printk(KERN_NOTICE "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
- dev->name, port_num,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ p = dev->dev_addr;
+ printk(KERN_NOTICE
+ "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, port_num, p[0], p[1], p[2], p[3], p[4], p[5]);
if (dev->features & NETIF_F_SG)
- printk("Scatter Gather Enabled ");
+ printk(KERN_NOTICE "%s: Scatter Gather Enabled", dev->name);
if (dev->features & NETIF_F_IP_CSUM)
- printk("TX TCP/IP Checksumming Supported \n");
+ printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
+ dev->name);
+
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
+#endif
- printk("RX TCP/UDP Checksum Offload ON, \n");
- printk("TX and RX Interrupt Coalescing ON \n");
+#ifdef MV64340_COAL
+ printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
+ dev->name);
+#endif
#ifdef MV64340_NAPI
- printk("RX NAPI Enabled \n");
+ printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
#endif
- return dev;
+ return 0;
-out_free_dev:
+out:
free_netdev(dev);
- return NULL;
+ return err;
}
-static void mv64340_eth_remove(struct net_device *dev)
+static int mv64340_eth_remove(struct device *ddev)
{
+ struct net_device *dev = dev_get_drvdata(ddev);
+
unregister_netdev(dev);
flush_scheduled_work();
+
free_netdev(dev);
+ dev_set_drvdata(ddev, NULL);
+ return 0;
}
-static struct net_device *mv64340_dev0;
-static struct net_device *mv64340_dev1;
-static struct net_device *mv64340_dev2;
+static int mv64340_eth_shared_probe(struct device *ddev)
+{
+ struct platform_device *pdev = to_platform_device(ddev);
+ struct resource *res;
+
+ printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ mv64x60_eth_shared_base = ioremap(res->start,
+ MV64340_ETH_SHARED_REGS_SIZE);
+ if (mv64x60_eth_shared_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+
+}
+
+static int mv64340_eth_shared_remove(struct device *ddev)
+{
+ iounmap(mv64x60_eth_shared_base);
+ mv64x60_eth_shared_base = NULL;
+
+ return 0;
+}
+
+static struct device_driver mv643xx_eth_driver = {
+ .name = MV64XXX_ETH_NAME,
+ .bus = &platform_bus_type,
+ .probe = mv64340_eth_probe,
+ .remove = mv64340_eth_remove,
+};
+
+static struct device_driver mv643xx_eth_shared_driver = {
+ .name = MV64XXX_ETH_SHARED_NAME,
+ .bus = &platform_bus_type,
+ .probe = mv64340_eth_shared_probe,
+ .remove = mv64340_eth_shared_remove,
+};
/*
* mv64340_init_module
@@ -1419,30 +1496,15 @@
*/
static int __init mv64340_init_module(void)
{
- printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
+ int rc;
-#ifdef CONFIG_MV643XX_ETH_0
- mv64340_dev0 = mv64340_eth_init(0);
- if (!mv64340_dev0) {
- printk(KERN_ERR
- "Error registering MV-64360 ethernet port 0\n");
- }
-#endif
-#ifdef CONFIG_MV643XX_ETH_1
- mv64340_dev1 = mv64340_eth_init(1);
- if (!mv64340_dev1) {
- printk(KERN_ERR
- "Error registering MV-64360 ethernet port 1\n");
+ rc = driver_register(&mv643xx_eth_shared_driver);
+ if (!rc) {
+ rc = driver_register(&mv643xx_eth_driver);
+ if (rc)
+ driver_unregister(&mv643xx_eth_shared_driver);
}
-#endif
-#ifdef CONFIG_MV643XX_ETH_2
- mv64340_dev2 = mv64340_eth_init(2);
- if (!mv64340_dev2) {
- printk(KERN_ERR
- "Error registering MV-64360 ethernet port 2\n");
- }
-#endif
- return 0;
+ return rc;
}
/*
@@ -1456,19 +1518,16 @@
*/
static void __exit mv64340_cleanup_module(void)
{
- if (mv64340_dev2)
- mv64340_eth_remove(mv64340_dev2);
- if (mv64340_dev1)
- mv64340_eth_remove(mv64340_dev1);
- if (mv64340_dev0)
- mv64340_eth_remove(mv64340_dev0);
+ driver_unregister(&mv643xx_eth_driver);
+ driver_unregister(&mv643xx_eth_shared_driver);
}
module_init(mv64340_init_module);
module_exit(mv64340_cleanup_module);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm and Manish Lachwani");
+MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
+ " and Dale Farnsworth");
MODULE_DESCRIPTION("Ethernet driver for Marvell MV64340");
/*
@@ -1796,6 +1855,42 @@
}
/*
+ * eth_port_uc_addr_get - This function retrieves the port Unicast address
+ * (MAC address) from the ethernet hw registers.
+ *
+ * DESCRIPTION:
+ * This function retrieves the port Ethernet MAC address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Port number.
+ * char *MacAddr pointer where the MAC address is stored
+ *
+ * OUTPUT:
+ * Copy the MAC address to the location pointed to by MacAddr
+ *
+ * RETURN:
+ * N/A.
+ *
+ */
+static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *MacAddr)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ u32 MacLow;
+ u32 MacHigh;
+
+ MacLow = MV_READ(MV64340_ETH_MAC_ADDR_LOW(port_num));
+ MacHigh = MV_READ(MV64340_ETH_MAC_ADDR_HIGH(port_num));
+
+ MacAddr[5] = (MacLow) & 0xff;
+ MacAddr[4] = (MacLow >> 8) & 0xff;
+ MacAddr[3] = (MacHigh) & 0xff;
+ MacAddr[2] = (MacHigh >> 8) & 0xff;
+ MacAddr[1] = (MacHigh >> 16) & 0xff;
+ MacAddr[0] = (MacHigh >> 24) & 0xff;
+}
+
+/*
* eth_port_uc_addr - This function Set the port unicast address table
*
* DESCRIPTION:
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.h 2004-12-13 14:30:38.294827930 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h 2004-12-13 14:30:39.867524366 -0700
@@ -46,10 +46,6 @@
* The first part is the high level driver of the gigE ethernet ports.
*/
-#define ETH_PORT0_IRQ_NUM 48 /* main high register, bit0 */
-#define ETH_PORT1_IRQ_NUM ETH_PORT0_IRQ_NUM+1 /* main high register, bit1 */
-#define ETH_PORT2_IRQ_NUM ETH_PORT0_IRQ_NUM+2 /* main high register, bit1 */
-
/* Checksum offload for Tx works */
#define MV64340_CHECKSUM_OFFLOAD_TX
#define MV64340_NAPI
Index: linux-2.5-marvell-submit/include/linux/mv643xx.h
===================================================================
--- linux-2.5-marvell-submit.orig/include/linux/mv643xx.h 2004-12-10 15:47:16.000000000 -0700
+++ linux-2.5-marvell-submit/include/linux/mv643xx.h 2004-12-13 14:30:39.868524173 -0700
@@ -663,6 +663,9 @@
/* Ethernet Unit Registers */
/****************************************/
+#define MV64340_ETH_SHARED_REGS 0x2000
+#define MV64340_ETH_SHARED_REGS_SIZE 0x2000
+
#define MV64340_ETH_PHY_ADDR_REG 0x2000
#define MV64340_ETH_SMI_REG 0x2004
#define MV64340_ETH_UNIT_DEFAULT_ADDR_REG 0x2008
@@ -1040,4 +1043,13 @@
extern void mv64340_irq_init(unsigned int base);
+#define MV64340_ETH_DESC_SIZE 64
+
+#define MV64XXX_ETH_SHARED_NAME "mv64xxx_eth_shared"
+#define MV64XXX_ETH_NAME "mv64xxx_eth"
+
+struct mv64xxx_eth_platform_data {
+ char *mac_addr; /* pointer to mac address */
+};
+
#endif /* __ASM_MV64340_H */
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 5/6] mv643xx_eth: Add support for platform device interface
2004-12-13 22:19 ` [PATCH 5/6] mv643xx_eth: Add support for platform device interface Dale Farnsworth
@ 2004-12-14 23:19 ` Christoph Hellwig
2004-12-15 18:32 ` Dale Farnsworth
0 siblings, 1 reply; 18+ messages in thread
From: Christoph Hellwig @ 2004-12-14 23:19 UTC (permalink / raw)
To: Dale Farnsworth
Cc: linux-kernel, Jeff Garzik, Ralf Baechle, Russell King,
Manish Lachwani, Brian Waite, Steven J. Hill
> +#undef MV_READ
> +#define MV_READ(offset) \
> + readl(mv64x60_eth_shared_base - MV64340_ETH_SHARED_REGS + offset)
> +
> +#undef MV_WRITE
> +#define MV_WRITE(offset, data) \
> + writel((u32)data, \
> + mv64x60_eth_shared_base - MV64340_ETH_SHARED_REGS + offset)
> +
please use different accessors. Best static inlines without shouting names.
> + */
> +static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *MacAddr)
> +{
> + struct mv64340_private *mp = netdev_priv(dev);
> + unsigned int port_num = mp->port_num;
> + u32 MacLow;
> + u32 MacHigh;
> +
> + MacLow = MV_READ(MV64340_ETH_MAC_ADDR_LOW(port_num));
> + MacHigh = MV_READ(MV64340_ETH_MAC_ADDR_HIGH(port_num));
> +
> + MacAddr[5] = (MacLow) & 0xff;
> + MacAddr[4] = (MacLow >> 8) & 0xff;
> + MacAddr[3] = (MacHigh) & 0xff;
> + MacAddr[2] = (MacHigh >> 8) & 0xff;
> + MacAddr[1] = (MacHigh >> 16) & 0xff;
> + MacAddr[0] = (MacHigh >> 24) & 0xff;
Please avoid mixed UpperLower case variable names. Also make sure to use
tabs for indentation again.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 5/6] mv643xx_eth: Add support for platform device interface
2004-12-14 23:19 ` Christoph Hellwig
@ 2004-12-15 18:32 ` Dale Farnsworth
0 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-15 18:32 UTC (permalink / raw)
To: Christoph Hellwig, linux-kernel
On Tue, Dec 14, 2004 at 11:19:24PM +0000, Christoph Hellwig wrote:
> > +#undef MV_READ
> > +#define MV_READ(offset) \
> > + readl(mv64x60_eth_shared_base - MV64340_ETH_SHARED_REGS + offset)
> > +
> > +#undef MV_WRITE
> > +#define MV_WRITE(offset, data) \
> > + writel((u32)data, \
> > + mv64x60_eth_shared_base - MV64340_ETH_SHARED_REGS + offset)
> > +
>
> please use different accessors. Best static inlines without shouting names.
The existing drivers uses MV_READ/MV_WRITE throughout. I agree it's
ugly but I kept them to minimize the patch size. I plan to submit a
patch to rename them and make these static inline functions as soon as
this set of patches is "in the queue".
> > + */
> > +static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *MacAddr)
> > +{
> > + struct mv64340_private *mp = netdev_priv(dev);
> > + unsigned int port_num = mp->port_num;
> > + u32 MacLow;
> > + u32 MacHigh;
> > +
> > + MacLow = MV_READ(MV64340_ETH_MAC_ADDR_LOW(port_num));
> > + MacHigh = MV_READ(MV64340_ETH_MAC_ADDR_HIGH(port_num));
> > +
> > + MacAddr[5] = (MacLow) & 0xff;
> > + MacAddr[4] = (MacLow >> 8) & 0xff;
> > + MacAddr[3] = (MacHigh) & 0xff;
> > + MacAddr[2] = (MacHigh >> 8) & 0xff;
> > + MacAddr[1] = (MacHigh >> 16) & 0xff;
> > + MacAddr[0] = (MacHigh >> 24) & 0xff;
>
> Please avoid mixed UpperLower case variable names. Also make sure to use
> tabs for indentation again.
I copied this from an existing driver, but I agree and will change.
Thanks,
-Dale
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 6/6] mv643xx_eth: add configurable parameters via platform device interface
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
` (4 preceding siblings ...)
2004-12-13 22:19 ` [PATCH 5/6] mv643xx_eth: Add support for platform device interface Dale Farnsworth
@ 2004-12-13 22:20 ` Dale Farnsworth
2004-12-14 22:51 ` [PATCH 7/6] mv643xx_eth: Remove use of MV_SET_REG_BITS macro Dale Farnsworth
2004-12-15 19:02 ` [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
7 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-13 22:20 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Russell King, Manish Lachwani, Brian Waite, Steven J. Hill
This patch adds support for passing additional parameters via the
platform device interface. These additional parameters are:
size of RX and TX descriptor rings
port_config value
port_config_extend value
port_sdma_config value
port_serial_control value
PHY address
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-13 14:30:39.866524559 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-13 14:30:44.084710360 -0700
@@ -75,6 +75,7 @@
#ifdef MV64340_NAPI
static int mv64340_poll(struct net_device *dev, int *budget);
#endif
+static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
static void __iomem *mv64x60_eth_shared_base;
@@ -250,12 +251,12 @@
ethernet_set_config_reg
(mp->port_num,
ethernet_get_config_reg(mp->port_num) |
- ETH_UNICAST_PROMISCUOUS_MODE);
+ MV64340_ETH_UNICAST_PROMISCUOUS_MODE);
} else {
ethernet_set_config_reg
(mp->port_num,
ethernet_get_config_reg(mp->port_num) &
- ~(unsigned int) ETH_UNICAST_PROMISCUOUS_MODE);
+ ~(unsigned int) MV64340_ETH_UNICAST_PROMISCUOUS_MODE);
}
}
@@ -509,8 +510,7 @@
/* UDP change : We may need this */
if ((eth_int_cause_ext & 0x0000ffff) &&
(mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
- (MV64340_TX_QUEUE_SIZE >
- mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
+ (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
netif_wake_queue(dev);
#ifdef MV64340_NAPI
} else {
@@ -830,47 +830,72 @@
mp->rx_task_busy = 0;
mp->rx_timer_flag = 0;
+ /* Allocate RX and TX skb rings */
+ mp->rx_skb = kmalloc(sizeof(*mp->rx_skb)*mp->rx_ring_size, GFP_KERNEL);
+ if (!mp->rx_skb) {
+ printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ mp->tx_skb = kmalloc(sizeof(*mp->tx_skb)*mp->tx_ring_size, GFP_KERNEL);
+ if (!mp->tx_skb) {
+ printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
+ kfree(mp->rx_skb);
+ return -ENOMEM;
+ }
+
/* Allocate TX ring */
mp->tx_ring_skbs = 0;
- mp->tx_ring_size = MV64340_TX_QUEUE_SIZE;
size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
mp->tx_desc_area_size = size;
- /* Assumes allocated ring is 16 bytes alligned */
- mp->p_tx_desc_area = dma_alloc_coherent(NULL, size, &mp->tx_desc_dma,
- GFP_KERNEL);
+ if (mp->tx_sram_size) {
+ mp->p_tx_desc_area = ioremap(mp->tx_sram_addr,
+ mp->tx_sram_size);
+ mp->tx_desc_dma = mp->tx_sram_addr;
+ } else
+ mp->p_tx_desc_area = dma_alloc_coherent(NULL, size,
+ &mp->tx_desc_dma, GFP_KERNEL);
+
if (!mp->p_tx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
dev->name, size);
+ kfree(mp->rx_skb);
+ kfree(mp->tx_skb);
return -ENOMEM;
}
+ BUG_ON((u32)mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
memset((void *) mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
- /* Dummy will be replaced upon real tx */
ether_init_tx_desc_ring(mp);
/* Allocate RX ring */
- /* Meantime RX Ring are fixed - but must be configurable by user */
- mp->rx_ring_size = MV64340_RX_QUEUE_SIZE;
mp->rx_ring_skbs = 0;
size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
mp->rx_desc_area_size = size;
- /* Assumes allocated ring is 16 bytes aligned */
-
- mp->p_rx_desc_area = dma_alloc_coherent(NULL, size, &mp->rx_desc_dma,
- GFP_KERNEL);
+ if (mp->rx_sram_size) {
+ mp->p_rx_desc_area = ioremap(mp->rx_sram_addr,
+ mp->rx_sram_size);
+ mp->rx_desc_dma = mp->rx_sram_addr;
+ } else
+ mp->p_rx_desc_area = dma_alloc_coherent(NULL, size,
+ &mp->rx_desc_dma, GFP_KERNEL);
if (!mp->p_rx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
dev->name, size);
printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
dev->name);
- dma_free_coherent(NULL, mp->tx_desc_area_size,
+ if (mp->rx_sram_size)
+ iounmap(mp->p_rx_desc_area);
+ else
+ dma_free_coherent(NULL, mp->tx_desc_area_size,
mp->p_tx_desc_area, mp->tx_desc_dma);
+ kfree(mp->rx_skb);
+ kfree(mp->tx_skb);
return -ENOMEM;
}
- memset(mp->p_rx_desc_area, 0, size);
+ memset((void *)mp->p_rx_desc_area, 0, size);
ether_init_rx_desc_ring(mp);
@@ -918,11 +943,9 @@
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
0x0000ff00);
- /* Free TX rings */
+
/* Free outstanding skb's on TX rings */
- for (curr = 0;
- (mp->tx_ring_skbs) && (curr < MV64340_TX_QUEUE_SIZE);
- curr++) {
+ for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
if (mp->tx_skb[curr]) {
dev_kfree_skb(mp->tx_skb[curr]);
mp->tx_ring_skbs--;
@@ -932,7 +955,12 @@
printk("%s: Error on Tx descriptor free - could not free %d"
" descriptors\n", dev->name,
mp->tx_ring_skbs);
- dma_free_coherent(0, mp->tx_desc_area_size,
+
+ /* Free TX ring */
+ if (mp->tx_sram_size)
+ iounmap(mp->p_tx_desc_area);
+ else
+ dma_free_coherent(NULL, mp->tx_desc_area_size,
mp->p_tx_desc_area, mp->tx_desc_dma);
}
@@ -946,11 +974,8 @@
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
0x0000ff00);
- /* Free RX rings */
/* Free preallocated skb's on RX rings */
- for (curr = 0;
- mp->rx_ring_skbs && (curr < MV64340_RX_QUEUE_SIZE);
- curr++) {
+ for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) {
if (mp->rx_skb[curr]) {
dev_kfree_skb(mp->rx_skb[curr]);
mp->rx_ring_skbs--;
@@ -962,7 +987,11 @@
"%s: Error in freeing Rx Ring. %d skb's still"
" stuck in RX Ring - ignoring them\n", dev->name,
mp->rx_ring_skbs);
- dma_free_coherent(NULL, mp->rx_desc_area_size,
+ /* Free RX ring */
+ if (mp->rx_sram_size)
+ iounmap(mp->p_rx_desc_area);
+ else
+ dma_free_coherent(NULL, mp->rx_desc_area_size,
mp->p_rx_desc_area, mp->rx_desc_dma);
}
@@ -1043,8 +1072,8 @@
}
if (netif_queue_stopped(dev) &&
- MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)
- netif_wake_queue(dev);
+ mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)
+ netif_wake_queue(dev);
}
/*
@@ -1123,8 +1152,8 @@
}
/* This is a hard error, log it. */
- if ((MV64340_TX_QUEUE_SIZE - mp->tx_ring_skbs) <=
- (skb_shinfo(skb)->nr_frags + 1)) {
+ if ((mp->tx_ring_size - mp->tx_ring_skbs) <=
+ (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
printk(KERN_ERR
"%s: Bug in mv64340_eth - Trying to transmit when"
@@ -1135,6 +1164,7 @@
/* Paranoid check - this shouldn't happen */
if (skb == NULL) {
stats->tx_dropped++;
+ printk(KERN_ERR "mv64320_eth paranoid check failed\n");
return 1;
}
@@ -1274,7 +1304,7 @@
/* Check if TX queue can handle another skb. If not, then
* signal higher layers to stop requesting TX
*/
- if (MV64340_TX_QUEUE_SIZE <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
+ if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
/*
* Stop getting skb's from upper layers.
* Getting skb's from upper layers will be enabled again after
@@ -1318,7 +1348,7 @@
* and set the MAC address of the interface
*
* Input : struct device *
- * Output : -ENOMEM or -ENODEV if failed , 0 if success
+ * Output : -ENOMEM if failed , 0 if success
*/
static int mv64340_eth_probe(struct device *ddev)
{
@@ -1339,12 +1369,9 @@
mp = netdev_priv(dev);
- if ((res = platform_get_resource(pdev, IORESOURCE_IRQ, 0)))
- dev->irq = res->start;
- else {
- err = -ENODEV;
- goto out;
- }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
mp->port_num = port_num;
@@ -1363,7 +1390,7 @@
#endif
dev->watchdog_timeo = 2 * HZ;
- dev->tx_queue_len = MV64340_TX_QUEUE_SIZE;
+ dev->tx_queue_len = mp->tx_ring_size;
dev->base_addr = 0;
dev->change_mtu = mv64340_eth_change_mtu;
@@ -1388,10 +1415,48 @@
/* set default config values */
eth_port_uc_addr_get(dev, dev->dev_addr);
+ mp->port_config = MV64340_ETH_PORT_CONFIG_DEFAULT_VALUE;
+ mp->port_config_extend = MV64340_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE;
+ mp->port_sdma_config = MV64340_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE;
+ mp->port_serial_control = MV64340_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE;
+ mp->rx_ring_size = MV64340_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
+ mp->tx_ring_size = MV64340_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
+
pd = pdev->dev.platform_data;
if (pd) {
if (pd->mac_addr != NULL)
memcpy(dev->dev_addr, pd->mac_addr, 6);
+
+ if (pd->phy_addr || pd->force_phy_addr)
+ ethernet_phy_set(port_num, pd->phy_addr);
+
+ if (pd->port_config || pd->force_port_config)
+ mp->port_config = pd->port_config;
+
+ if (pd->port_config_extend || pd->force_port_config_extend)
+ mp->port_config_extend = pd->port_config_extend;
+
+ if (pd->port_sdma_config || pd->force_port_sdma_config)
+ mp->port_sdma_config = pd->port_sdma_config;
+
+ if (pd->port_serial_control || pd->force_port_serial_control)
+ mp->port_serial_control = pd->port_serial_control;
+
+ if (pd->rx_queue_size)
+ mp->rx_ring_size = pd->rx_queue_size;
+
+ if (pd->tx_queue_size)
+ mp->tx_ring_size = pd->tx_queue_size;
+
+ if (pd->tx_sram_size) {
+ mp->tx_sram_size = pd->tx_sram_size;
+ mp->tx_sram_addr = pd->tx_sram_addr;
+ }
+
+ if (pd->rx_sram_size) {
+ mp->rx_sram_size = pd->rx_sram_size;
+ mp->rx_sram_addr = pd->rx_sram_addr;
+ }
}
err = register_netdev(dev);
@@ -1404,7 +1469,7 @@
dev->name, port_num, p[0], p[1], p[2], p[3], p[4], p[5]);
if (dev->features & NETIF_F_SG)
- printk(KERN_NOTICE "%s: Scatter Gather Enabled", dev->name);
+ printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name);
if (dev->features & NETIF_F_IP_CSUM)
printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
@@ -1658,12 +1723,6 @@
* port_sdma_config User port SDMA config value.
* port_serial_control User port serial control value.
*
- * This driver introduce a set of default values:
- * PORT_CONFIG_VALUE Default port configuration value
- * PORT_CONFIG_EXTEND_VALUE Default port extend configuration value
- * PORT_SDMA_CONFIG_VALUE Default sdma control value
- * PORT_SERIAL_CONTROL_VALUE Default port serial control value
- *
* This driver data flow is done using the struct pkt_info which
* is a unified struct for Rx and Tx operations:
*
@@ -1684,6 +1743,7 @@
/* PHY routines */
static int ethernet_phy_get(unsigned int eth_port_num);
+static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
/* Ethernet Port routines */
static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
@@ -1715,18 +1775,6 @@
*/
static void eth_port_init(struct mv64340_private * mp)
{
- mp->port_config = PORT_CONFIG_VALUE;
- mp->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
-#if defined(__BIG_ENDIAN)
- mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
-#elif defined(__LITTLE_ENDIAN)
- mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE |
- ETH_BLM_RX_NO_SWAP | ETH_BLM_TX_NO_SWAP;
-#else
-#error One of __LITTLE_ENDIAN or __BIG_ENDIAN must be defined!
-#endif
- mp->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
-
mp->port_rx_queue_command = 0;
mp->port_tx_queue_command = 0;
@@ -1798,7 +1846,7 @@
mp->port_serial_control);
MV_SET_REG_BITS(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
- ETH_SERIAL_PORT_ENABLE);
+ MV64340_ETH_SERIAL_PORT_ENABLE);
/* Assign port SDMA configuration */
MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
@@ -2049,6 +2097,34 @@
}
/*
+ * ethernet_phy_set - Set the ethernet port PHY address.
+ *
+ * DESCRIPTION:
+ * This routine sets the given ethernet port PHY address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ * int phy_addr PHY address.
+ *
+ * OUTPUT:
+ * None.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
+{
+ u32 reg_data;
+ int addr_shift = 5 * eth_port_num;
+
+ reg_data = MV_READ(MV64340_ETH_PHY_ADDR_REG);
+ reg_data &= ~(0x1f << addr_shift);
+ reg_data |= (phy_addr & 0x1f) << addr_shift;
+ MV_WRITE(MV64340_ETH_PHY_ADDR_REG, reg_data);
+}
+
+/*
* ethernet_phy_reset - Reset Ethernet port PHY.
*
* DESCRIPTION:
@@ -2132,7 +2208,7 @@
/* Reset the Enable bit in the Configuration Register */
reg_data = MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num));
- reg_data &= ~ETH_SERIAL_PORT_ENABLE;
+ reg_data &= ~MV64340_ETH_SERIAL_PORT_ENABLE;
MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data);
}
@@ -2360,7 +2436,7 @@
current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
- tx_next_desc = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
+ tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size;
current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
@@ -2444,7 +2520,7 @@
ETH_ENABLE_TX_QUEUE(mp->port_num);
/* Finish Tx packet. Update first desc in case of Tx resource error */
- tx_desc_curr = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
+ tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size;
/* Update the current descriptor */
mp->tx_curr_desc_q = tx_desc_curr;
@@ -2520,7 +2596,7 @@
mp->tx_skb[tx_desc_used] = NULL;
/* Update the next descriptor to release. */
- mp->tx_used_desc_q = (tx_desc_used + 1) % MV64340_TX_QUEUE_SIZE;
+ mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size;
/* Any Tx return cancels the Tx resource error status */
mp->tx_resource_err = 0;
@@ -2586,7 +2662,7 @@
mp->rx_skb[rx_curr_desc] = NULL;
/* Update current index in data structure */
- rx_next_curr_desc = (rx_curr_desc + 1) % MV64340_RX_QUEUE_SIZE;
+ rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size;
mp->rx_curr_desc_q = rx_next_curr_desc;
/* Rx descriptors exhausted. Set the Rx ring resource error flag */
@@ -2640,7 +2716,7 @@
wmb();
/* Move the used descriptor pointer to the next descriptor */
- mp->rx_used_desc_q = (used_rx_desc + 1) % MV64340_RX_QUEUE_SIZE;
+ mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size;
/* Any Rx return cancels the Rx resource error status */
mp->rx_resource_err = 0;
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.h 2004-12-13 14:30:39.867524366 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.h 2004-12-13 14:30:44.085710167 -0700
@@ -61,10 +61,10 @@
*/
/* Default TX ring size is 1000 descriptors */
-#define MV64340_TX_QUEUE_SIZE 1000
+#define MV64340_DEFAULT_TX_QUEUE_SIZE 1000
/* Default RX ring size is 400 descriptors */
-#define MV64340_RX_QUEUE_SIZE 400
+#define MV64340_DEFAULT_RX_QUEUE_SIZE 400
#define MV64340_TX_COAL 100
#ifdef MV64340_COAL
@@ -89,58 +89,6 @@
*
*/
-/* Default port configuration value */
-#define PORT_CONFIG_VALUE \
- ETH_UNICAST_NORMAL_MODE | \
- ETH_DEFAULT_RX_QUEUE_0 | \
- ETH_DEFAULT_RX_ARP_QUEUE_0 | \
- ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP | \
- ETH_RECEIVE_BC_IF_IP | \
- ETH_RECEIVE_BC_IF_ARP | \
- ETH_CAPTURE_TCP_FRAMES_DIS | \
- ETH_CAPTURE_UDP_FRAMES_DIS | \
- ETH_DEFAULT_RX_TCP_QUEUE_0 | \
- ETH_DEFAULT_RX_UDP_QUEUE_0 | \
- ETH_DEFAULT_RX_BPDU_QUEUE_0
-
-/* Default port extend configuration value */
-#define PORT_CONFIG_EXTEND_VALUE \
- ETH_SPAN_BPDU_PACKETS_AS_NORMAL | \
- ETH_PARTITION_DISABLE
-
-
-/* Default sdma control value */
-#define PORT_SDMA_CONFIG_VALUE \
- ETH_RX_BURST_SIZE_16_64BIT | \
- GT_ETH_IPG_INT_RX(0) | \
- ETH_TX_BURST_SIZE_16_64BIT;
-
-#define GT_ETH_IPG_INT_RX(value) \
- ((value & 0x3fff) << 8)
-
-/* Default port serial control value */
-#define PORT_SERIAL_CONTROL_VALUE \
- ETH_FORCE_LINK_PASS | \
- ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \
- ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
- ETH_ADV_SYMMETRIC_FLOW_CTRL | \
- ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
- ETH_FORCE_BP_MODE_NO_JAM | \
- BIT9 | \
- ETH_DO_NOT_FORCE_LINK_FAIL | \
- ETH_RETRANSMIT_16_ATTEMPTS | \
- ETH_ENABLE_AUTO_NEG_SPEED_GMII | \
- ETH_DTE_ADV_0 | \
- ETH_DISABLE_AUTO_NEG_BYPASS | \
- ETH_AUTO_NEG_NO_CHANGE | \
- ETH_MAX_RX_PACKET_9700BYTE | \
- ETH_CLR_EXT_LOOPBACK | \
- ETH_SET_FULL_DUPLEX_MODE | \
- ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
-
-#define RX_BUFFER_MAX_SIZE 0x4000000
-#define TX_BUFFER_MAX_SIZE 0x4000000
-
/* MAC accepet/reject macros */
#define ACCEPT_MAC_ADDR 0
#define REJECT_MAC_ADDR 1
@@ -207,156 +155,12 @@
#define ETH_PORT_TX_FIFO_EMPTY BIT10
-/* These macros describes the Port configuration reg (Px_cR) bits */
-#define ETH_UNICAST_NORMAL_MODE 0
-#define ETH_UNICAST_PROMISCUOUS_MODE BIT0
-#define ETH_DEFAULT_RX_QUEUE_0 0
-#define ETH_DEFAULT_RX_QUEUE_1 BIT1
-#define ETH_DEFAULT_RX_QUEUE_2 BIT2
-#define ETH_DEFAULT_RX_QUEUE_3 (BIT2 | BIT1)
-#define ETH_DEFAULT_RX_QUEUE_4 BIT3
-#define ETH_DEFAULT_RX_QUEUE_5 (BIT3 | BIT1)
-#define ETH_DEFAULT_RX_QUEUE_6 (BIT3 | BIT2)
-#define ETH_DEFAULT_RX_QUEUE_7 (BIT3 | BIT2 | BIT1)
-#define ETH_DEFAULT_RX_ARP_QUEUE_0 0
-#define ETH_DEFAULT_RX_ARP_QUEUE_1 BIT4
-#define ETH_DEFAULT_RX_ARP_QUEUE_2 BIT5
-#define ETH_DEFAULT_RX_ARP_QUEUE_3 (BIT5 | BIT4)
-#define ETH_DEFAULT_RX_ARP_QUEUE_4 BIT6
-#define ETH_DEFAULT_RX_ARP_QUEUE_5 (BIT6 | BIT4)
-#define ETH_DEFAULT_RX_ARP_QUEUE_6 (BIT6 | BIT5)
-#define ETH_DEFAULT_RX_ARP_QUEUE_7 (BIT6 | BIT5 | BIT4)
-#define ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP 0
-#define ETH_REJECT_BC_IF_NOT_IP_OR_ARP BIT7
-#define ETH_RECEIVE_BC_IF_IP 0
-#define ETH_REJECT_BC_IF_IP BIT8
-#define ETH_RECEIVE_BC_IF_ARP 0
-#define ETH_REJECT_BC_IF_ARP BIT9
-#define ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY BIT12
-#define ETH_CAPTURE_TCP_FRAMES_DIS 0
-#define ETH_CAPTURE_TCP_FRAMES_EN BIT14
-#define ETH_CAPTURE_UDP_FRAMES_DIS 0
-#define ETH_CAPTURE_UDP_FRAMES_EN BIT15
-#define ETH_DEFAULT_RX_TCP_QUEUE_0 0
-#define ETH_DEFAULT_RX_TCP_QUEUE_1 BIT16
-#define ETH_DEFAULT_RX_TCP_QUEUE_2 BIT17
-#define ETH_DEFAULT_RX_TCP_QUEUE_3 (BIT17 | BIT16)
-#define ETH_DEFAULT_RX_TCP_QUEUE_4 BIT18
-#define ETH_DEFAULT_RX_TCP_QUEUE_5 (BIT18 | BIT16)
-#define ETH_DEFAULT_RX_TCP_QUEUE_6 (BIT18 | BIT17)
-#define ETH_DEFAULT_RX_TCP_QUEUE_7 (BIT18 | BIT17 | BIT16)
-#define ETH_DEFAULT_RX_UDP_QUEUE_0 0
-#define ETH_DEFAULT_RX_UDP_QUEUE_1 BIT19
-#define ETH_DEFAULT_RX_UDP_QUEUE_2 BIT20
-#define ETH_DEFAULT_RX_UDP_QUEUE_3 (BIT20 | BIT19)
-#define ETH_DEFAULT_RX_UDP_QUEUE_4 (BIT21
-#define ETH_DEFAULT_RX_UDP_QUEUE_5 (BIT21 | BIT19)
-#define ETH_DEFAULT_RX_UDP_QUEUE_6 (BIT21 | BIT20)
-#define ETH_DEFAULT_RX_UDP_QUEUE_7 (BIT21 | BIT20 | BIT19)
-#define ETH_DEFAULT_RX_BPDU_QUEUE_0 0
-#define ETH_DEFAULT_RX_BPDU_QUEUE_1 BIT22
-#define ETH_DEFAULT_RX_BPDU_QUEUE_2 BIT23
#define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22)
#define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24
#define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22)
#define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23)
#define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22)
-
-/* These macros describes the Port configuration extend reg (Px_cXR) bits*/
-#define ETH_CLASSIFY_EN BIT0
-#define ETH_SPAN_BPDU_PACKETS_AS_NORMAL 0
-#define ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 BIT1
-#define ETH_PARTITION_DISABLE 0
-#define ETH_PARTITION_ENABLE BIT2
-
-
-/* Tx/Rx queue command reg (RQCR/TQCR)*/
-#define ETH_QUEUE_0_ENABLE BIT0
-#define ETH_QUEUE_1_ENABLE BIT1
-#define ETH_QUEUE_2_ENABLE BIT2
-#define ETH_QUEUE_3_ENABLE BIT3
-#define ETH_QUEUE_4_ENABLE BIT4
-#define ETH_QUEUE_5_ENABLE BIT5
-#define ETH_QUEUE_6_ENABLE BIT6
-#define ETH_QUEUE_7_ENABLE BIT7
-#define ETH_QUEUE_0_DISABLE BIT8
-#define ETH_QUEUE_1_DISABLE BIT9
-#define ETH_QUEUE_2_DISABLE BIT10
-#define ETH_QUEUE_3_DISABLE BIT11
-#define ETH_QUEUE_4_DISABLE BIT12
-#define ETH_QUEUE_5_DISABLE BIT13
-#define ETH_QUEUE_6_DISABLE BIT14
-#define ETH_QUEUE_7_DISABLE BIT15
-
-
-/* These macros describes the Port Sdma configuration reg (SDCR) bits */
-#define ETH_RIFB BIT0
-#define ETH_RX_BURST_SIZE_1_64BIT 0
-#define ETH_RX_BURST_SIZE_2_64BIT BIT1
-#define ETH_RX_BURST_SIZE_4_64BIT BIT2
-#define ETH_RX_BURST_SIZE_8_64BIT (BIT2 | BIT1)
-#define ETH_RX_BURST_SIZE_16_64BIT BIT3
-#define ETH_BLM_RX_NO_SWAP BIT4
-#define ETH_BLM_RX_BYTE_SWAP 0
-#define ETH_BLM_TX_NO_SWAP BIT5
-#define ETH_BLM_TX_BYTE_SWAP 0
-#define ETH_DESCRIPTORS_BYTE_SWAP BIT6
-#define ETH_DESCRIPTORS_NO_SWAP 0
-#define ETH_TX_BURST_SIZE_1_64BIT 0
-#define ETH_TX_BURST_SIZE_2_64BIT BIT22
-#define ETH_TX_BURST_SIZE_4_64BIT BIT23
-#define ETH_TX_BURST_SIZE_8_64BIT (BIT23 | BIT22)
-#define ETH_TX_BURST_SIZE_16_64BIT BIT24
-
-
-
-/* These macros describes the Port serial control reg (PSCR) bits */
-#define ETH_SERIAL_PORT_DISABLE 0
-#define ETH_SERIAL_PORT_ENABLE BIT0
-#define ETH_FORCE_LINK_PASS BIT1
-#define ETH_DO_NOT_FORCE_LINK_PASS 0
-#define ETH_ENABLE_AUTO_NEG_FOR_DUPLX 0
-#define ETH_DISABLE_AUTO_NEG_FOR_DUPLX BIT2
-#define ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL 0
-#define ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL BIT3
-#define ETH_ADV_NO_FLOW_CTRL 0
-#define ETH_ADV_SYMMETRIC_FLOW_CTRL BIT4
-#define ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0
-#define ETH_FORCE_FC_MODE_TX_PAUSE_DIS BIT5
-#define ETH_FORCE_BP_MODE_NO_JAM 0
-#define ETH_FORCE_BP_MODE_JAM_TX BIT7
-#define ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR BIT8
-#define ETH_FORCE_LINK_FAIL 0
-#define ETH_DO_NOT_FORCE_LINK_FAIL BIT10
-#define ETH_RETRANSMIT_16_ATTEMPTS 0
-#define ETH_RETRANSMIT_FOREVER BIT11
-#define ETH_DISABLE_AUTO_NEG_SPEED_GMII BIT13
-#define ETH_ENABLE_AUTO_NEG_SPEED_GMII 0
-#define ETH_DTE_ADV_0 0
-#define ETH_DTE_ADV_1 BIT14
-#define ETH_DISABLE_AUTO_NEG_BYPASS 0
-#define ETH_ENABLE_AUTO_NEG_BYPASS BIT15
-#define ETH_AUTO_NEG_NO_CHANGE 0
-#define ETH_RESTART_AUTO_NEG BIT16
-#define ETH_MAX_RX_PACKET_1518BYTE 0
-#define ETH_MAX_RX_PACKET_1522BYTE BIT17
-#define ETH_MAX_RX_PACKET_1552BYTE BIT18
-#define ETH_MAX_RX_PACKET_9022BYTE (BIT18 | BIT17)
-#define ETH_MAX_RX_PACKET_9192BYTE BIT19
-#define ETH_MAX_RX_PACKET_9700BYTE (BIT19 | BIT17)
-#define ETH_SET_EXT_LOOPBACK BIT20
-#define ETH_CLR_EXT_LOOPBACK 0
-#define ETH_SET_FULL_DUPLEX_MODE BIT21
-#define ETH_SET_HALF_DUPLEX_MODE 0
-#define ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX BIT22
-#define ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0
-#define ETH_SET_GMII_SPEED_TO_10_100 0
-#define ETH_SET_GMII_SPEED_TO_1000 BIT23
-#define ETH_SET_MII_SPEED_TO_10 0
-#define ETH_SET_MII_SPEED_TO_100 BIT24
-
-
/* SMI reg */
#define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */
#define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */
@@ -495,6 +299,11 @@
u32 port_tx_queue_command; /* Port active Tx queues summary */
u32 port_rx_queue_command; /* Port active Rx queues summary */
+ u32 rx_sram_addr; /* Base address of rx sram area */
+ u32 rx_sram_size; /* Size of rx sram area */
+ u32 tx_sram_addr; /* Base address of tx sram area */
+ u32 tx_sram_size; /* Size of tx sram area */
+
int rx_resource_err; /* Rx ring resource error flag */
int tx_resource_err; /* Tx ring resource error flag */
@@ -517,12 +326,12 @@
struct eth_rx_desc * p_rx_desc_area;
dma_addr_t rx_desc_dma;
unsigned int rx_desc_area_size;
- struct sk_buff * rx_skb[MV64340_RX_QUEUE_SIZE];
+ struct sk_buff ** rx_skb;
struct eth_tx_desc * p_tx_desc_area;
dma_addr_t tx_desc_dma;
unsigned int tx_desc_area_size;
- struct sk_buff * tx_skb[MV64340_TX_QUEUE_SIZE];
+ struct sk_buff ** tx_skb;
struct work_struct tx_timeout_task;
Index: linux-2.5-marvell-submit/include/linux/mv643xx.h
===================================================================
--- linux-2.5-marvell-submit.orig/include/linux/mv643xx.h 2004-12-13 14:30:39.868524173 -0700
+++ linux-2.5-marvell-submit/include/linux/mv643xx.h 2004-12-13 14:30:44.086709974 -0700
@@ -1043,13 +1043,207 @@
extern void mv64340_irq_init(unsigned int base);
+/* These macros describe Ethernet Port configuration reg (Px_cR) bits */
+#define MV64340_ETH_UNICAST_NORMAL_MODE 0
+#define MV64340_ETH_UNICAST_PROMISCUOUS_MODE (1<<0)
+#define MV64340_ETH_DEFAULT_RX_QUEUE_0 0
+#define MV64340_ETH_DEFAULT_RX_QUEUE_1 (1<<1)
+#define MV64340_ETH_DEFAULT_RX_QUEUE_2 (1<<2)
+#define MV64340_ETH_DEFAULT_RX_QUEUE_3 ((1<<2) | (1<<1))
+#define MV64340_ETH_DEFAULT_RX_QUEUE_4 (1<<3)
+#define MV64340_ETH_DEFAULT_RX_QUEUE_5 ((1<<3) | (1<<1))
+#define MV64340_ETH_DEFAULT_RX_QUEUE_6 ((1<<3) | (1<<2))
+#define MV64340_ETH_DEFAULT_RX_QUEUE_7 ((1<<3) | (1<<2) | (1<<1))
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_0 0
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_1 (1<<4)
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_2 (1<<5)
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_3 ((1<<5) | (1<<4))
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_4 (1<<6)
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_5 ((1<<6) | (1<<4))
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_6 ((1<<6) | (1<<5))
+#define MV64340_ETH_DEFAULT_RX_ARP_QUEUE_7 ((1<<6) | (1<<5) | (1<<4))
+#define MV64340_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP 0
+#define MV64340_ETH_REJECT_BC_IF_NOT_IP_OR_ARP (1<<7)
+#define MV64340_ETH_RECEIVE_BC_IF_IP 0
+#define MV64340_ETH_REJECT_BC_IF_IP (1<<8)
+#define MV64340_ETH_RECEIVE_BC_IF_ARP 0
+#define MV64340_ETH_REJECT_BC_IF_ARP (1<<9)
+#define MV64340_ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY (1<<12)
+#define MV64340_ETH_CAPTURE_TCP_FRAMES_DIS 0
+#define MV64340_ETH_CAPTURE_TCP_FRAMES_EN (1<<14)
+#define MV64340_ETH_CAPTURE_UDP_FRAMES_DIS 0
+#define MV64340_ETH_CAPTURE_UDP_FRAMES_EN (1<<15)
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_0 0
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_1 (1<<16)
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_2 (1<<17)
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_3 ((1<<17) | (1<<16))
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_4 (1<<18)
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_5 ((1<<18) | (1<<16))
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_6 ((1<<18) | (1<<17))
+#define MV64340_ETH_DEFAULT_RX_TCP_QUEUE_7 ((1<<18) | (1<<17) | (1<<16))
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_0 0
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_1 (1<<19)
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_2 (1<<20)
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_3 ((1<<20) | (1<<19))
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_4 ((1<<21)
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_5 ((1<<21) | (1<<19))
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_6 ((1<<21) | (1<<20))
+#define MV64340_ETH_DEFAULT_RX_UDP_QUEUE_7 ((1<<21) | (1<<20) | (1<<19))
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_0 0
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_1 (1<<22)
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_2 (1<<23)
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_3 ((1<<23) | (1<<22))
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_4 (1<<24)
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_5 ((1<<24) | (1<<22))
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_6 ((1<<24) | (1<<23))
+#define MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_7 ((1<<24) | (1<<23) | (1<<22))
+
+#define MV64340_ETH_PORT_CONFIG_DEFAULT_VALUE \
+ MV64340_ETH_UNICAST_NORMAL_MODE | \
+ MV64340_ETH_DEFAULT_RX_QUEUE_0 | \
+ MV64340_ETH_DEFAULT_RX_ARP_QUEUE_0 | \
+ MV64340_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP | \
+ MV64340_ETH_RECEIVE_BC_IF_IP | \
+ MV64340_ETH_RECEIVE_BC_IF_ARP | \
+ MV64340_ETH_CAPTURE_TCP_FRAMES_DIS | \
+ MV64340_ETH_CAPTURE_UDP_FRAMES_DIS | \
+ MV64340_ETH_DEFAULT_RX_TCP_QUEUE_0 | \
+ MV64340_ETH_DEFAULT_RX_UDP_QUEUE_0 | \
+ MV64340_ETH_DEFAULT_RX_BPDU_QUEUE_0
+
+/* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/
+#define MV64340_ETH_CLASSIFY_EN (1<<0)
+#define MV64340_ETH_SPAN_BPDU_PACKETS_AS_NORMAL 0
+#define MV64340_ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1<<1)
+#define MV64340_ETH_PARTITION_DISABLE 0
+#define MV64340_ETH_PARTITION_ENABLE (1<<2)
+
+#define MV64340_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE \
+ MV64340_ETH_SPAN_BPDU_PACKETS_AS_NORMAL | \
+ MV64340_ETH_PARTITION_DISABLE
+
+/* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */
+#define MV64340_ETH_RIFB (1<<0)
+#define MV64340_ETH_RX_BURST_SIZE_1_64BIT 0
+#define MV64340_ETH_RX_BURST_SIZE_2_64BIT (1<<1)
+#define MV64340_ETH_RX_BURST_SIZE_4_64BIT (1<<2)
+#define MV64340_ETH_RX_BURST_SIZE_8_64BIT ((1<<2) | (1<<1))
+#define MV64340_ETH_RX_BURST_SIZE_16_64BIT (1<<3)
+#define MV64340_ETH_BLM_RX_NO_SWAP (1<<4)
+#define MV64340_ETH_BLM_RX_BYTE_SWAP 0
+#define MV64340_ETH_BLM_TX_NO_SWAP (1<<5)
+#define MV64340_ETH_BLM_TX_BYTE_SWAP 0
+#define MV64340_ETH_DESCRIPTORS_BYTE_SWAP (1<<6)
+#define MV64340_ETH_DESCRIPTORS_NO_SWAP 0
+#define MV64340_ETH_TX_BURST_SIZE_1_64BIT 0
+#define MV64340_ETH_TX_BURST_SIZE_2_64BIT (1<<22)
+#define MV64340_ETH_TX_BURST_SIZE_4_64BIT (1<<23)
+#define MV64340_ETH_TX_BURST_SIZE_8_64BIT ((1<<23) | (1<<22))
+#define MV64340_ETH_TX_BURST_SIZE_16_64BIT (1<<24)
+
+#define MV64340_ETH_IPG_INT_RX(value) ((value & 0x3fff) << 8)
+
+#define MV64340_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE \
+ MV64340_ETH_RX_BURST_SIZE_4_64BIT | \
+ MV64340_ETH_IPG_INT_RX(0) | \
+ MV64340_ETH_TX_BURST_SIZE_4_64BIT
+
+/* These macros describe Ethernet Port serial control reg (PSCR) bits */
+#define MV64340_ETH_SERIAL_PORT_DISABLE 0
+#define MV64340_ETH_SERIAL_PORT_ENABLE (1<<0)
+#define MV64340_ETH_FORCE_LINK_PASS (1<<1)
+#define MV64340_ETH_DO_NOT_FORCE_LINK_PASS 0
+#define MV64340_ETH_ENABLE_AUTO_NEG_FOR_DUPLX 0
+#define MV64340_ETH_DISABLE_AUTO_NEG_FOR_DUPLX (1<<2)
+#define MV64340_ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL 0
+#define MV64340_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1<<3)
+#define MV64340_ETH_ADV_NO_FLOW_CTRL 0
+#define MV64340_ETH_ADV_SYMMETRIC_FLOW_CTRL (1<<4)
+#define MV64340_ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0
+#define MV64340_ETH_FORCE_FC_MODE_TX_PAUSE_DIS (1<<5)
+#define MV64340_ETH_FORCE_BP_MODE_NO_JAM 0
+#define MV64340_ETH_FORCE_BP_MODE_JAM_TX (1<<7)
+#define MV64340_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8)
+#define MV64340_ETH_FORCE_LINK_FAIL 0
+#define MV64340_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10)
+#define MV64340_ETH_RETRANSMIT_16_ATTEMPTS 0
+#define MV64340_ETH_RETRANSMIT_FOREVER (1<<11)
+#define MV64340_ETH_DISABLE_AUTO_NEG_SPEED_GMII (1<<13)
+#define MV64340_ETH_ENABLE_AUTO_NEG_SPEED_GMII 0
+#define MV64340_ETH_DTE_ADV_0 0
+#define MV64340_ETH_DTE_ADV_1 (1<<14)
+#define MV64340_ETH_DISABLE_AUTO_NEG_BYPASS 0
+#define MV64340_ETH_ENABLE_AUTO_NEG_BYPASS (1<<15)
+#define MV64340_ETH_AUTO_NEG_NO_CHANGE 0
+#define MV64340_ETH_RESTART_AUTO_NEG (1<<16)
+#define MV64340_ETH_MAX_RX_PACKET_1518BYTE 0
+#define MV64340_ETH_MAX_RX_PACKET_1522BYTE (1<<17)
+#define MV64340_ETH_MAX_RX_PACKET_1552BYTE (1<<18)
+#define MV64340_ETH_MAX_RX_PACKET_9022BYTE ((1<<18) | (1<<17))
+#define MV64340_ETH_MAX_RX_PACKET_9192BYTE (1<<19)
+#define MV64340_ETH_MAX_RX_PACKET_9700BYTE ((1<<19) | (1<<17))
+#define MV64340_ETH_SET_EXT_LOOPBACK (1<<20)
+#define MV64340_ETH_CLR_EXT_LOOPBACK 0
+#define MV64340_ETH_SET_FULL_DUPLEX_MODE (1<<21)
+#define MV64340_ETH_SET_HALF_DUPLEX_MODE 0
+#define MV64340_ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1<<22)
+#define MV64340_ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0
+#define MV64340_ETH_SET_GMII_SPEED_TO_10_100 0
+#define MV64340_ETH_SET_GMII_SPEED_TO_1000 (1<<23)
+#define MV64340_ETH_SET_MII_SPEED_TO_10 0
+#define MV64340_ETH_SET_MII_SPEED_TO_100 (1<<24)
+
+#define MV64340_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \
+ MV64340_ETH_DO_NOT_FORCE_LINK_PASS | \
+ MV64340_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \
+ MV64340_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
+ MV64340_ETH_ADV_SYMMETRIC_FLOW_CTRL | \
+ MV64340_ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
+ MV64340_ETH_FORCE_BP_MODE_NO_JAM | \
+ (1<<9) /* reserved */ | \
+ MV64340_ETH_DO_NOT_FORCE_LINK_FAIL | \
+ MV64340_ETH_RETRANSMIT_16_ATTEMPTS | \
+ MV64340_ETH_ENABLE_AUTO_NEG_SPEED_GMII | \
+ MV64340_ETH_DTE_ADV_0 | \
+ MV64340_ETH_DISABLE_AUTO_NEG_BYPASS | \
+ MV64340_ETH_AUTO_NEG_NO_CHANGE | \
+ MV64340_ETH_MAX_RX_PACKET_9700BYTE | \
+ MV64340_ETH_CLR_EXT_LOOPBACK | \
+ MV64340_ETH_SET_FULL_DUPLEX_MODE | \
+ MV64340_ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
+
+#define MV64340_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800
+#define MV64340_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400
+
#define MV64340_ETH_DESC_SIZE 64
#define MV64XXX_ETH_SHARED_NAME "mv64xxx_eth_shared"
#define MV64XXX_ETH_NAME "mv64xxx_eth"
struct mv64xxx_eth_platform_data {
- char *mac_addr; /* pointer to mac address */
+ /*
+ * Non-values for mac_addr, phy_addr, port_config, etc.
+ * override the default value. Setting the corresponding
+ * force_* field, causes the default value to be overridden
+ * even when zero.
+ */
+ unsigned int force_phy_addr:1;
+ unsigned int force_port_config:1;
+ unsigned int force_port_config_extend:1;
+ unsigned int force_port_sdma_config:1;
+ unsigned int force_port_serial_control:1;
+ int phy_addr;
+ char *mac_addr; /* pointer to mac address */
+ u32 port_config;
+ u32 port_config_extend;
+ u32 port_sdma_config;
+ u32 port_serial_control;
+ u32 tx_queue_size;
+ u32 rx_queue_size;
+ u32 tx_sram_addr;
+ u32 tx_sram_size;
+ u32 rx_sram_addr;
+ u32 rx_sram_size;
};
#endif /* __ASM_MV64340_H */
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 7/6] mv643xx_eth: Remove use of MV_SET_REG_BITS macro
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
` (5 preceding siblings ...)
2004-12-13 22:20 ` [PATCH 6/6] mv643xx_eth: add configurable parameters via " Dale Farnsworth
@ 2004-12-14 22:51 ` Dale Farnsworth
2004-12-14 22:56 ` Russell King
2004-12-15 19:02 ` [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
7 siblings, 1 reply; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-14 22:51 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Russell King, Manish Lachwani, Brian Waite, Steven J. Hill
Oops, I missed this in my first set of patches for the mv643xx_eth driver.
This patch removes the need for the MV_SET_REG_BITS macro in the mv643xx_eth
driver.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-14 15:07:49.537387217 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-14 15:07:53.721135861 -0700
@@ -1845,8 +1845,9 @@
MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
mp->port_serial_control);
- MV_SET_REG_BITS(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
- MV64340_ETH_SERIAL_PORT_ENABLE);
+ MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
+ MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num)) |
+ MV64340_ETH_SERIAL_PORT_ENABLE);
/* Assign port SDMA configuration */
MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH] mv643xx_eth support for platform device interface + more
2004-12-13 22:09 [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
` (6 preceding siblings ...)
2004-12-14 22:51 ` [PATCH 7/6] mv643xx_eth: Remove use of MV_SET_REG_BITS macro Dale Farnsworth
@ 2004-12-15 19:02 ` Dale Farnsworth
2004-12-15 19:18 ` [PATCH 8/6] mv643xx_eth: address style issues raised by Christoph Hellwig Dale Farnsworth
7 siblings, 1 reply; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-15 19:02 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Manish Lachwani, Brian Waite, Steven J. Hill
This patch addresses the style issues raised by Christoph Hellwig.
Locally, I have folded these changes into my patch set, but since they
are issues of style, rather than substance, I don't think it's worth
re-releasing the patch set now, and I'm just supplying this additional
patch.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Index: linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
===================================================================
--- linux-2.5-marvell-submit.orig/drivers/net/mv643xx_eth.c 2004-12-15 11:36:46.108784630 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-15 11:38:29.662690590 -0700
@@ -66,6 +66,9 @@
#define MAX_DESCS_PER_SKB 1
#endif
+#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
+#define PHY_WAIT_MICRO_SECONDS 10
+
/* Static function declarations */
static void eth_port_uc_addr_get(struct net_device *dev,
unsigned char *MacAddr);
@@ -81,6 +84,7 @@
static void __iomem *mv64x60_eth_shared_base;
+/* used to protect MV64340_ETH_SMI_REG, which is shared across ports */
static spinlock_t mv64340_eth_phy_lock = SPIN_LOCK_UNLOCKED;
#undef MV_READ
@@ -948,7 +952,7 @@
& 0xfff1ffff));
/* wait up to 1 second for link to come up */
- for (i=0; i<10; i++) {
+ for (i = 0; i < 10; i++) {
eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
if (phy_reg_data & 0x20) {
netif_start_queue(dev);
@@ -1949,22 +1953,21 @@
* N/A.
*
*/
-static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *MacAddr)
+static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr)
{
struct mv64340_private *mp = netdev_priv(dev);
- unsigned int port_num = mp->port_num;
- u32 MacLow;
- u32 MacHigh;
+ unsigned int mac_h;
+ unsigned int mac_l;
- MacLow = MV_READ(MV64340_ETH_MAC_ADDR_LOW(port_num));
- MacHigh = MV_READ(MV64340_ETH_MAC_ADDR_HIGH(port_num));
+ mac_h = MV_READ(MV64340_ETH_MAC_ADDR_HIGH(mp->port_num));
+ mac_l = MV_READ(MV64340_ETH_MAC_ADDR_LOW(mp->port_num));
- MacAddr[5] = (MacLow) & 0xff;
- MacAddr[4] = (MacLow >> 8) & 0xff;
- MacAddr[3] = (MacHigh) & 0xff;
- MacAddr[2] = (MacHigh >> 8) & 0xff;
- MacAddr[1] = (MacHigh >> 16) & 0xff;
- MacAddr[0] = (MacHigh >> 24) & 0xff;
+ p_addr[0] = (mac_h << 24) & 0xff;
+ p_addr[1] = (mac_h << 16) & 0xff;
+ p_addr[2] = (mac_h << 8) & 0xff;
+ p_addr[3] = mac_h & 0xff;
+ p_addr[4] = (mac_l << 8) & 0xff;
+ p_addr[5] = mac_l & 0xff;
}
/*
@@ -2297,9 +2300,6 @@
return eth_config_reg;
}
-#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
-
-
/*
* eth_port_read_smi_reg - Read PHY registers
*
@@ -2331,24 +2331,24 @@
spin_lock_irqsave(&mv64340_eth_phy_lock, flags);
/* wait for the SMI register to become available */
- for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ for (i = 0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("mv64340 PHY busy timeout, port %d\n", port_num);
goto out;
}
- udelay(10);
+ udelay(PHY_WAIT_MICRO_SECONDS);
}
MV_WRITE(MV64340_ETH_SMI_REG,
(phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
/* now wait for the data to be valid */
- for (i=0; !(MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
+ for (i = 0; !(MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("mv64340 PHY read timeout, port %d\n", port_num);
goto out;
}
- udelay(10);
+ udelay(PHY_WAIT_MICRO_SECONDS);
}
*value = MV_READ(MV64340_ETH_SMI_REG) & 0xffff;
@@ -2390,13 +2390,13 @@
spin_lock_irqsave(&mv64340_eth_phy_lock, flags);
/* wait for the SMI register to become available */
- for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ for (i = 0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("mv64340 PHY busy timeout, port %d\n",
eth_port_num);
goto out;
}
- udelay(10);
+ udelay(PHY_WAIT_MICRO_SECONDS);
}
MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 8/6] mv643xx_eth: address style issues raised by Christoph Hellwig
2004-12-15 19:02 ` [PATCH] mv643xx_eth support for platform device interface + more Dale Farnsworth
@ 2004-12-15 19:18 ` Dale Farnsworth
0 siblings, 0 replies; 18+ messages in thread
From: Dale Farnsworth @ 2004-12-15 19:18 UTC (permalink / raw)
To: linux-kernel, Jeff Garzik
Cc: Ralf Baechle, Manish Lachwani, Brian Waite, Steven J. Hill
On Wed, Dec 15, 2004 at 12:02:07PM -0700, dale wrote:
> This patch addresses the style issues raised by Christoph Hellwig.
> Locally, I have folded these changes into my patch set, but since they
> are issues of style, rather than substance, I don't think it's worth
> re-releasing the patch set now, and I'm just supplying this additional
> patch.
I botched the previous patch. Please disregard it and use this one.
This patch addresses the style issues raised by Christoph Hellwig.
Locally, I have folded these changes into my patch set, but since they
are issues of style, rather than substance, I don't think it's worth
re-releasing the patch set now, and I'm just supplying this additional
patch.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
diff -u linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c
--- linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-15 11:38:29.662690590 -0700
+++ linux-2.5-marvell-submit/drivers/net/mv643xx_eth.c 2004-12-15 12:06:25.259733848 -0700
@@ -66,6 +66,9 @@
#define MAX_DESCS_PER_SKB 1
#endif
+#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
+#define PHY_WAIT_MICRO_SECONDS 10
+
/* Static function declarations */
static void eth_port_uc_addr_get(struct net_device *dev,
unsigned char *MacAddr);
@@ -81,6 +84,7 @@
static void __iomem *mv64x60_eth_shared_base;
+/* used to protect MV64340_ETH_SMI_REG, which is shared across ports */
static spinlock_t mv64340_eth_phy_lock = SPIN_LOCK_UNLOCKED;
#undef MV_READ
@@ -355,8 +359,7 @@
dma_unmap_single(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, DMA_TO_DEVICE);
- dev_kfree_skb_irq((struct sk_buff *)
- pkt_info.return_info);
+ dev_kfree_skb_irq(pkt_info.return_info);
released = 0;
/*
@@ -415,7 +418,7 @@
/* Update statistics. Note byte count includes 4 byte CRC count */
stats->rx_packets++;
stats->rx_bytes += pkt_info.byte_cnt;
- skb = (struct sk_buff *) pkt_info.return_info;
+ skb = pkt_info.return_info;
/*
* In case received a packet without first / last bits on OR
* the error summary bit is on, the packets needs to be dropeed.
@@ -948,7 +951,7 @@
& 0xfff1ffff));
/* wait up to 1 second for link to come up */
- for (i=0; i<10; i++) {
+ for (i = 0; i < 10; i++) {
eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
if (phy_reg_data & 0x20) {
netif_start_queue(dev);
@@ -1089,8 +1092,7 @@
dma_unmap_single(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, DMA_TO_DEVICE);
- dev_kfree_skb_irq((struct sk_buff *)
- pkt_info.return_info);
+ dev_kfree_skb_irq(pkt_info.return_info);
if (mp->tx_ring_skbs != 0)
mp->tx_ring_skbs--;
@@ -1949,22 +1951,21 @@
* N/A.
*
*/
-static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *MacAddr)
+static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr)
{
struct mv64340_private *mp = netdev_priv(dev);
- unsigned int port_num = mp->port_num;
- u32 MacLow;
- u32 MacHigh;
-
- MacLow = MV_READ(MV64340_ETH_MAC_ADDR_LOW(port_num));
- MacHigh = MV_READ(MV64340_ETH_MAC_ADDR_HIGH(port_num));
-
- MacAddr[5] = (MacLow) & 0xff;
- MacAddr[4] = (MacLow >> 8) & 0xff;
- MacAddr[3] = (MacHigh) & 0xff;
- MacAddr[2] = (MacHigh >> 8) & 0xff;
- MacAddr[1] = (MacHigh >> 16) & 0xff;
- MacAddr[0] = (MacHigh >> 24) & 0xff;
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ mac_h = MV_READ(MV64340_ETH_MAC_ADDR_HIGH(mp->port_num));
+ mac_l = MV_READ(MV64340_ETH_MAC_ADDR_LOW(mp->port_num));
+
+ p_addr[0] = (mac_h >> 24) & 0xff;
+ p_addr[1] = (mac_h >> 16) & 0xff;
+ p_addr[2] = (mac_h >> 8) & 0xff;
+ p_addr[3] = mac_h & 0xff;
+ p_addr[4] = (mac_l >> 8) & 0xff;
+ p_addr[5] = mac_l & 0xff;
}
/*
@@ -2297,9 +2298,6 @@
return eth_config_reg;
}
-#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
-
-
/*
* eth_port_read_smi_reg - Read PHY registers
*
@@ -2331,24 +2329,24 @@
spin_lock_irqsave(&mv64340_eth_phy_lock, flags);
/* wait for the SMI register to become available */
- for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ for (i = 0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("mv64340 PHY busy timeout, port %d\n", port_num);
goto out;
}
- udelay(10);
+ udelay(PHY_WAIT_MICRO_SECONDS);
}
MV_WRITE(MV64340_ETH_SMI_REG,
(phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
/* now wait for the data to be valid */
- for (i=0; !(MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
+ for (i = 0; !(MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("mv64340 PHY read timeout, port %d\n", port_num);
goto out;
}
- udelay(10);
+ udelay(PHY_WAIT_MICRO_SECONDS);
}
*value = MV_READ(MV64340_ETH_SMI_REG) & 0xffff;
@@ -2390,13 +2388,13 @@
spin_lock_irqsave(&mv64340_eth_phy_lock, flags);
/* wait for the SMI register to become available */
- for (i=0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ for (i = 0; MV_READ(MV64340_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("mv64340 PHY busy timeout, port %d\n",
eth_port_num);
goto out;
}
- udelay(10);
+ udelay(PHY_WAIT_MICRO_SECONDS);
}
MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
@@ -2470,7 +2468,7 @@
current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
- mp->tx_skb[tx_desc_curr] = (struct sk_buff*) p_pkt_info->return_info;
+ mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info;
command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC |
ETH_BUFFER_OWNED_BY_DMA;
@@ -2537,7 +2535,7 @@
command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
- mp->tx_skb[tx_desc_curr] = (struct sk_buff *) p_pkt_info->return_info;
+ mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info;
/* Set last desc with DMA ownership and interrupt enable. */
^ permalink raw reply [flat|nested] 18+ messages in thread