aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2004-08-16 06:03:58 -0400
committerJeff Garzik <jgarzik@pobox.com>2004-08-16 06:03:58 -0400
commit997037cccca30be1af6b42e6d0f7ffb3c64ebfd0 (patch)
treec659fbfc67aadf35ce93b7106de2f9ef18891be7 /drivers
parent4d8021616effa7a49b6df2ad0083dcce6016a3a7 (diff)
parent803dc86ef422c1b8f81ef380b0ff0673b7b1b4de (diff)
downloadhistory-997037cccca30be1af6b42e6d0f7ffb3c64ebfd0.tar.gz
Merge pobox.com:/spare/repo/linux-2.6
into pobox.com:/spare/repo/netdev-2.6/ALL
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/8139too.c4
-rw-r--r--drivers/net/8390.c36
-rw-r--r--drivers/net/8390.h12
-rw-r--r--drivers/net/Kconfig45
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/acenic.c156
-rw-r--r--drivers/net/acenic.h2
-rw-r--r--drivers/net/e100.c89
-rw-r--r--drivers/net/e1000/e1000.h24
-rw-r--r--drivers/net/e1000/e1000_ethtool.c146
-rw-r--r--drivers/net/e1000/e1000_hw.c16
-rw-r--r--drivers/net/e1000/e1000_hw.h3
-rw-r--r--drivers/net/e1000/e1000_main.c486
-rw-r--r--drivers/net/e1000/e1000_param.c46
-rw-r--r--drivers/net/eepro100.c17
-rw-r--r--drivers/net/epic100.c432
-rw-r--r--drivers/net/gianfar.c1921
-rw-r--r--drivers/net/gianfar.h537
-rw-r--r--drivers/net/gianfar_ethtool.c484
-rw-r--r--drivers/net/gianfar_phy.c504
-rw-r--r--drivers/net/gianfar_phy.h192
-rw-r--r--drivers/net/gt64240eth.h402
-rw-r--r--drivers/net/gt96100eth.c119
-rw-r--r--drivers/net/mv643xx_eth.c2646
-rw-r--r--drivers/net/mv643xx_eth.h601
-rw-r--r--drivers/net/natsemi.c15
-rw-r--r--drivers/net/r8169.c653
-rw-r--r--drivers/net/sk98lin/h/skdrv2nd.h54
-rw-r--r--drivers/net/sk98lin/skaddr.c4
-rw-r--r--drivers/net/sk98lin/skge.c742
-rw-r--r--drivers/net/via-rhine.c687
-rw-r--r--drivers/net/via-velocity.c566
-rw-r--r--drivers/net/via-velocity.h4
-rw-r--r--drivers/net/wireless/airport.c34
-rw-r--r--drivers/net/wireless/hermes.c12
-rw-r--r--drivers/net/wireless/hermes.h133
-rw-r--r--drivers/net/wireless/hermes_rid.h99
-rw-r--r--drivers/net/wireless/ieee802_11.h1
-rw-r--r--drivers/net/wireless/orinoco.c2485
-rw-r--r--drivers/net/wireless/orinoco.h36
-rw-r--r--drivers/net/wireless/orinoco_cs.c70
-rw-r--r--drivers/net/wireless/orinoco_pci.c52
-rw-r--r--drivers/net/wireless/orinoco_plx.c204
-rw-r--r--drivers/net/wireless/orinoco_tmd.c56
44 files changed, 11196 insertions, 3634 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 58290c5bbc397a..e62be9137b2311 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -613,7 +613,7 @@ static int rtl8139_open (struct net_device *dev);
static int mdio_read (struct net_device *dev, int phy_id, int location);
static void mdio_write (struct net_device *dev, int phy_id, int location,
int val);
-static inline void rtl8139_start_thread(struct net_device *dev);
+static void rtl8139_start_thread(struct net_device *dev);
static void rtl8139_tx_timeout (struct net_device *dev);
static void rtl8139_init_ring (struct net_device *dev);
static int rtl8139_start_xmit (struct sk_buff *skb,
@@ -1643,7 +1643,7 @@ static int rtl8139_thread (void *data)
complete_and_exit (&tp->thr_exited, 0);
}
-static inline void rtl8139_start_thread(struct net_device *dev)
+static void rtl8139_start_thread(struct net_device *dev)
{
struct rtl8139_private *tp = dev->priv;
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 22e7dfb4766618..4f95132c7b3d3f 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -41,6 +41,7 @@
module by all drivers that require it.
Alan Cox : Spinlocking work, added 'BUG_83C690'
Paul Gortmaker : Separate out Tx timeout code from Tx path.
+ Paul Gortmaker : Remove old unused single Tx buffer code.
Sources:
The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
@@ -289,8 +290,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
-#ifdef EI_PINGPONG
-
/*
* We have two Tx slots available for use. Find the first free
* slot, and then perform some sanity checks. With two Tx bufs,
@@ -309,7 +308,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
else if (ei_local->tx2 == 0)
{
- output_page = ei_local->tx_start_page + TX_1X_PAGES;
+ output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
@@ -366,28 +365,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
else
netif_start_queue(dev);
-#else /* EI_PINGPONG */
-
- /*
- * Only one Tx buffer in use. You need two Tx bufs to come close to
- * back-to-back transmits. Expect a 20 -> 25% performance hit on
- * reasonable hardware if you only use one Tx buffer.
- */
-
- if (length == send_length)
- ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
- else {
- memset(scratch, 0, ETH_ZLEN);
- memcpy(scratch, skb->data, skb->len);
- ei_block_output(dev, ETH_ZLEN, scratch, ei_local->tx_start_page);
- }
- ei_local->txing = 1;
- NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
- dev->trans_start = jiffies;
- netif_stop_queue(dev);
-
-#endif /* EI_PINGPONG */
-
/* Turn 8390 interrupts back on. */
ei_local->irqlock = 0;
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -590,8 +567,6 @@ static void ei_tx_intr(struct net_device *dev)
outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
-#ifdef EI_PINGPONG
-
/*
* There are two Tx buffers, see which one finished, and trigger
* the send of another one if it exists.
@@ -634,13 +609,6 @@ static void ei_tx_intr(struct net_device *dev)
// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
// dev->name, ei_local->lasttx);
-#else /* EI_PINGPONG */
- /*
- * Single Tx buffer: mark it free so another packet can be loaded.
- */
- ei_local->txing = 0;
-#endif
-
/* Minimize Tx latency: update the statistics after we restart TXing. */
if (status & ENTSR_COL)
ei_local->stat.collisions++;
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
index 58fd65e03ce66c..948723b72d4ada 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/8390.h
@@ -12,17 +12,7 @@
#include <linux/ioport.h>
#include <linux/skbuff.h>
-#define TX_2X_PAGES 12
-#define TX_1X_PAGES 6
-
-/* Should always use two Tx slots to get back-to-back transmits. */
-#define EI_PINGPONG
-
-#ifdef EI_PINGPONG
-#define TX_PAGES TX_2X_PAGES
-#else
-#define TX_PAGES TX_1X_PAGES
-#endif
+#define TX_PAGES 12 /* Two Tx slots */
#define ETHER_ADDR_LEN 6
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 882837793f3138..171c46f1c33c51 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1746,6 +1746,7 @@ config VIA_VELOCITY
tristate "VIA Velocity support"
depends on NET_PCI && PCI
select CRC32
+ select CRC16
select MII
help
If you have a VIA "Velocity" based network card say Y here.
@@ -2043,6 +2044,11 @@ config R8169
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
+config R8169_NAPI
+ bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)"
+ depends on R8169 && EXPERIMENTAL
+
+
config SK98LIN
tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
depends on PCI
@@ -2131,6 +2137,45 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
+config GIANFAR
+ tristate "Gianfar Ethernet"
+ depends on 85xx
+ help
+ This driver supports the Gigabit TSEC on the MPC85xx
+ family of chips, and the FEC on the 8540
+
+config GFAR_NAPI
+ bool "NAPI Support"
+ depends on GIANFAR
+
+config MV643XX_ETH
+ tristate "MV-643XX Ethernet support"
+ depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX
+ help
+ This driver supports the gigabit Ethernet on the Marvell MV643XX
+ chipset which is used in the Momenco Ocelot C and Jaguar ATX.
+
+config MV643XX_ETH_0
+ bool "MV-643XX Port 0"
+ depends on MV643XX_ETH
+ help
+ This enables support for Port 0 of the Marvell MV643XX Gigabit
+ Ethernet.
+
+config MV643XX_ETH_1
+ bool "MV-643XX Port 1"
+ depends on MV643XX_ETH
+ help
+ This enables support for Port 1 of the Marvell MV643XX Gigabit
+ Ethernet.
+
+config MV643XX_ETH_2
+ bool "MV-643XX Port 2"
+ depends on MV643XX_ETH
+ help
+ This enables support for Port 2 of the Marvell MV643XX Gigabit
+ Ethernet.
+
endmenu
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 2ae71cf8b4b0bf..9ebd64e54253e3 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_BONDING) += bonding/
+obj-$(CONFIG_GIANFAR) += gianfar.o gianfar_ethtool.o gianfar_phy.o
#
# link order important here
@@ -95,6 +96,8 @@ obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_FORCEDETH) += forcedeth.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+
obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 90ed3329ad9d19..cc56ca3ac02a85 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -52,6 +52,7 @@
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -425,13 +426,15 @@ static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
-MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(trace, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(tx_coal_tick, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(tx_ratio, "1-" __MODULE_STRING(8) "i");
+
+static int num_params;
+module_param_array(link, int, num_params, 0);
+module_param_array(trace, int, num_params, 0);
+module_param_array(tx_coal_tick, int, num_params, 0);
+module_param_array(max_tx_desc, int, num_params, 0);
+module_param_array(rx_coal_tick, int, num_params, 0);
+module_param_array(max_rx_desc, int, num_params, 0);
+module_param_array(tx_ratio, int, num_params, 0);
MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
@@ -474,6 +477,7 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
ap = dev->priv;
ap->pdev = pdev;
+ ap->name = pci_name(pdev);
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
#if ACENIC_DO_VLAN
@@ -516,7 +520,7 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
"access - was not enabled by BIOS/Firmware\n",
- dev->name);
+ ap->name);
ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
pci_write_config_word(ap->pdev, PCI_COMMAND,
ap->pci_command);
@@ -539,55 +543,40 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
if (!ap->regs) {
printk(KERN_ERR "%s: Unable to map I/O register, "
"AceNIC %i will be disabled.\n",
- dev->name, boards_found);
+ ap->name, boards_found);
goto fail_free_netdev;
}
switch(pdev->vendor) {
case PCI_VENDOR_ID_ALTEON:
if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
- strncpy(ap->name, "Farallon PN9100-T "
- "Gigabit Ethernet", sizeof (ap->name));
printk(KERN_INFO "%s: Farallon PN9100-T ",
- dev->name);
+ ap->name);
} else {
- strncpy(ap->name, "AceNIC Gigabit Ethernet",
- sizeof (ap->name));
printk(KERN_INFO "%s: Alteon AceNIC ",
- dev->name);
+ ap->name);
}
break;
case PCI_VENDOR_ID_3COM:
- strncpy(ap->name, "3Com 3C985 Gigabit Ethernet",
- sizeof (ap->name));
- printk(KERN_INFO "%s: 3Com 3C985 ", dev->name);
+ printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
break;
case PCI_VENDOR_ID_NETGEAR:
- strncpy(ap->name, "NetGear GA620 Gigabit Ethernet",
- sizeof (ap->name));
- printk(KERN_INFO "%s: NetGear GA620 ", dev->name);
+ printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
break;
case PCI_VENDOR_ID_DEC:
if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
- strncpy(ap->name, "Farallon PN9000-SX "
- "Gigabit Ethernet", sizeof (ap->name));
printk(KERN_INFO "%s: Farallon PN9000-SX ",
- dev->name);
+ ap->name);
break;
}
case PCI_VENDOR_ID_SGI:
- strncpy(ap->name, "SGI AceNIC Gigabit Ethernet",
- sizeof (ap->name));
- printk(KERN_INFO "%s: SGI AceNIC ", dev->name);
+ printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
default:
- strncpy(ap->name, "Unknown AceNIC based Gigabit "
- "Ethernet", sizeof (ap->name));
- printk(KERN_INFO "%s: Unknown AceNIC ", dev->name);
+ printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
break;
}
- ap->name [sizeof (ap->name) - 1] = '\0';
printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
#ifdef __sparc__
printk("irq %s\n", __irq_itoa(pdev->irq));
@@ -622,6 +611,7 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
printk(KERN_ERR "acenic: device registration failed\n");
goto fail_uninit;
}
+ ap->name = dev->name;
if (ap->pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
@@ -641,7 +631,7 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
static void __devexit acenic_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs *regs = ap->regs;
short i;
@@ -752,7 +742,7 @@ module_exit(acenic_exit);
static void ace_free_descriptors(struct net_device *dev)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
int size;
if (ap->rx_std_ring != NULL) {
@@ -802,7 +792,7 @@ static void ace_free_descriptors(struct net_device *dev)
static int ace_allocate_descriptors(struct net_device *dev)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
int size;
size = (sizeof(struct rx_desc) *
@@ -873,7 +863,7 @@ static void ace_init_cleanup(struct net_device *dev)
{
struct ace_private *ap;
- ap = dev->priv;
+ ap = netdev_priv(dev);
ace_free_descriptors(dev);
@@ -921,7 +911,7 @@ static int __init ace_init(struct net_device *dev)
short i;
unsigned char cache_size;
- ap = dev->priv;
+ ap = netdev_priv(dev);
regs = ap->regs;
board_idx = ap->board_idx;
@@ -1387,7 +1377,7 @@ static int __init ace_init(struct net_device *dev)
if (board_idx == BOARD_IDX_OVERFLOW) {
printk(KERN_WARNING "%s: more than %i NICs detected, "
"ignoring module parameters!\n",
- dev->name, ACE_MAX_MOD_PARMS);
+ ap->name, ACE_MAX_MOD_PARMS);
} else if (board_idx >= 0) {
if (tx_coal_tick[board_idx])
writel(tx_coal_tick[board_idx],
@@ -1426,7 +1416,7 @@ static int __init ace_init(struct net_device *dev)
if (option & 0x01) {
printk(KERN_INFO "%s: Setting half duplex link\n",
- dev->name);
+ ap->name);
tmp &= ~LNK_FULL_DUPLEX;
}
if (option & 0x02)
@@ -1439,7 +1429,7 @@ static int __init ace_init(struct net_device *dev)
tmp |= LNK_1000MB;
if ((option & 0x70) == 0) {
printk(KERN_WARNING "%s: No media speed specified, "
- "forcing auto negotiation\n", dev->name);
+ "forcing auto negotiation\n", ap->name);
tmp |= LNK_NEGOTIATE | LNK_1000MB |
LNK_100MB | LNK_10MB;
}
@@ -1447,12 +1437,12 @@ static int __init ace_init(struct net_device *dev)
tmp |= LNK_NEG_FCTL;
else
printk(KERN_INFO "%s: Disabling flow control "
- "negotiation\n", dev->name);
+ "negotiation\n", ap->name);
if (option & 0x200)
tmp |= LNK_RX_FLOW_CTL_Y;
if ((option & 0x400) && (ap->version >= 2)) {
printk(KERN_INFO "%s: Enabling TX flow control\n",
- dev->name);
+ ap->name);
tmp |= LNK_TX_FLOW_CTL_Y;
}
}
@@ -1509,7 +1499,7 @@ static int __init ace_init(struct net_device *dev)
cpu_relax();
if (!ap->fw_running) {
- printk(KERN_ERR "%s: Firmware NOT running!\n", dev->name);
+ printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
ace_dump_trace(ap);
writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
@@ -1542,13 +1532,13 @@ static int __init ace_init(struct net_device *dev)
ace_load_std_rx_ring(ap, RX_RING_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
- dev->name);
+ ap->name);
if (ap->version >= 2) {
if (!test_and_set_bit(0, &ap->mini_refill_busy))
ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling "
- "the RX mini ring\n", dev->name);
+ "the RX mini ring\n", ap->name);
}
return 0;
@@ -1564,7 +1554,7 @@ static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
struct ace_regs *regs;
int board_idx;
- ap = dev->priv;
+ ap = netdev_priv(dev);
regs = ap->regs;
board_idx = ap->board_idx;
@@ -1604,7 +1594,7 @@ static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
static void ace_watchdog(struct net_device *data)
{
struct net_device *dev = data;
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs *regs = ap->regs;
/*
@@ -1878,13 +1868,13 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
{
struct ace_private *ap;
- ap = dev->priv;
+ ap = netdev_priv(dev);
while (evtcsm != evtprd) {
switch (ap->evt_ring[evtcsm].evt) {
case E_FW_RUNNING:
printk(KERN_INFO "%s: Firmware up and running\n",
- dev->name);
+ ap->name);
ap->fw_running = 1;
wmb();
break;
@@ -1899,7 +1889,7 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
u32 state = readl(&ap->regs->GigLnkState);
printk(KERN_WARNING "%s: Optical link UP "
"(%s Duplex, Flow Control: %s%s)\n",
- dev->name,
+ ap->name,
state & LNK_FULL_DUPLEX ? "Full":"Half",
state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
@@ -1907,15 +1897,15 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
}
case E_C_LINK_DOWN:
printk(KERN_WARNING "%s: Optical link DOWN\n",
- dev->name);
+ ap->name);
break;
case E_C_LINK_10_100:
printk(KERN_WARNING "%s: 10/100BaseT link "
- "UP\n", dev->name);
+ "UP\n", ap->name);
break;
default:
printk(KERN_ERR "%s: Unknown optical link "
- "state %02x\n", dev->name, code);
+ "state %02x\n", ap->name, code);
}
break;
}
@@ -1923,19 +1913,19 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
switch(ap->evt_ring[evtcsm].code) {
case E_C_ERR_INVAL_CMD:
printk(KERN_ERR "%s: invalid command error\n",
- dev->name);
+ ap->name);
break;
case E_C_ERR_UNIMP_CMD:
printk(KERN_ERR "%s: unimplemented command "
- "error\n", dev->name);
+ "error\n", ap->name);
break;
case E_C_ERR_BAD_CFG:
printk(KERN_ERR "%s: bad config error\n",
- dev->name);
+ ap->name);
break;
default:
printk(KERN_ERR "%s: unknown error %02x\n",
- dev->name, ap->evt_ring[evtcsm].code);
+ ap->name, ap->evt_ring[evtcsm].code);
}
break;
case E_RESET_JUMBO_RNG:
@@ -1964,13 +1954,13 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
ap->jumbo = 0;
ap->rx_jumbo_skbprd = 0;
printk(KERN_INFO "%s: Jumbo ring flushed\n",
- dev->name);
+ ap->name);
clear_bit(0, &ap->jumbo_refill_busy);
break;
}
default:
printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
- dev->name, ap->evt_ring[evtcsm].evt);
+ ap->name, ap->evt_ring[evtcsm].evt);
}
evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
}
@@ -1981,7 +1971,7 @@ static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
u32 idx;
int mini_count = 0, std_count = 0;
@@ -2108,7 +2098,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
static inline void ace_tx_int(struct net_device *dev,
u32 txcsm, u32 idx)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
do {
struct sk_buff *skb;
@@ -2181,7 +2171,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
u32 txcsm, rxretcsm, rxretprd;
u32 evtcsm, evtprd;
- ap = dev->priv;
+ ap = netdev_priv(dev);
regs = ap->regs;
/*
@@ -2304,7 +2294,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
#if ACENIC_DO_VLAN
static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
unsigned long flags;
local_irq_save(flags);
@@ -2319,7 +2309,7 @@ static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
unsigned long flags;
local_irq_save(flags);
@@ -2340,7 +2330,7 @@ static int ace_open(struct net_device *dev)
struct ace_regs *regs;
struct cmd cmd;
- ap = dev->priv;
+ ap = netdev_priv(dev);
regs = ap->regs;
if (!(ap->fw_running)) {
@@ -2407,7 +2397,7 @@ static int ace_close(struct net_device *dev)
*/
netif_stop_queue(dev);
- ap = dev->priv;
+ ap = netdev_priv(dev);
regs = ap->regs;
if (ap->promisc) {
@@ -2522,7 +2512,7 @@ ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs *regs = ap->regs;
struct tx_desc *desc;
u32 idx, flagsize;
@@ -2661,7 +2651,7 @@ overflow:
static int ace_change_mtu(struct net_device *dev, int new_mtu)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs *regs = ap->regs;
if (new_mtu > ACE_JUMBO_MTU)
@@ -2698,7 +2688,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs *regs = ap->regs;
u32 link;
@@ -2751,7 +2741,7 @@ static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs *regs = ap->regs;
u32 link, speed;
@@ -2814,7 +2804,7 @@ static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static void ace_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
strlcpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->version, sizeof(info->version), "%i.%i.%i",
@@ -2844,7 +2834,7 @@ static int ace_set_mac_addr(struct net_device *dev, void *p)
da = (u8 *)dev->dev_addr;
- regs = ((struct ace_private *)dev->priv)->regs;
+ regs = ((struct ace_private *)netdev_priv(dev))->regs;
writel(da[0] << 8 | da[1], &regs->MacAddrHi);
writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
&regs->MacAddrLo);
@@ -2860,7 +2850,7 @@ static int ace_set_mac_addr(struct net_device *dev, void *p)
static void ace_set_multicast_list(struct net_device *dev)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs *regs = ap->regs;
struct cmd cmd;
@@ -2914,7 +2904,7 @@ static void ace_set_multicast_list(struct net_device *dev)
static struct net_device_stats *ace_get_stats(struct net_device *dev)
{
- struct ace_private *ap = dev->priv;
+ struct ace_private *ap = netdev_priv(dev);
struct ace_mac_stats *mac_stats =
(struct ace_mac_stats *)ap->regs->Stats;
@@ -2997,12 +2987,12 @@ int __init ace_load_firmware(struct net_device *dev)
struct ace_private *ap;
struct ace_regs *regs;
- ap = dev->priv;
+ ap = netdev_priv(dev);
regs = ap->regs;
if (!(readl(&regs->CpuCtrl) & CPU_HALTED)) {
printk(KERN_ERR "%s: trying to download firmware while the "
- "CPU is running!\n", dev->name);
+ "CPU is running!\n", ap->name);
return -EFAULT;
}
@@ -3178,6 +3168,7 @@ static void __init eeprom_stop(struct ace_regs *regs)
static int __init read_eeprom_byte(struct net_device *dev,
unsigned long offset)
{
+ struct ace_private *ap;
struct ace_regs *regs;
unsigned long flags;
u32 local;
@@ -3187,10 +3178,11 @@ static int __init read_eeprom_byte(struct net_device *dev,
if (!dev) {
printk(KERN_ERR "No device!\n");
result = -ENODEV;
- goto eeprom_read_error;
+ goto out;
}
- regs = ((struct ace_private *)dev->priv)->regs;
+ ap = netdev_priv(dev);
+ regs = ap->regs;
/*
* Don't take interrupts on this CPU will bit banging
@@ -3203,7 +3195,7 @@ static int __init read_eeprom_byte(struct net_device *dev,
eeprom_prep(regs, EEPROM_WRITE_SELECT);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
- printk(KERN_ERR "%s: Unable to sync eeprom\n", dev->name);
+ printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
result = -EIO;
goto eeprom_read_error;
}
@@ -3212,7 +3204,7 @@ static int __init read_eeprom_byte(struct net_device *dev,
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set address byte 0\n",
- dev->name);
+ ap->name);
result = -EIO;
goto eeprom_read_error;
}
@@ -3221,7 +3213,7 @@ static int __init read_eeprom_byte(struct net_device *dev,
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set address byte 1\n",
- dev->name);
+ ap->name);
result = -EIO;
goto eeprom_read_error;
}
@@ -3231,7 +3223,7 @@ static int __init read_eeprom_byte(struct net_device *dev,
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
- dev->name);
+ ap->name);
result = -EIO;
goto eeprom_read_error;
}
@@ -3288,7 +3280,7 @@ static int __init read_eeprom_byte(struct net_device *dev,
eeprom_read_error:
printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
- dev->name, offset);
+ ap->name, offset);
goto out;
}
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index c4624b85d98569..6eb134e7b57430 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -693,7 +693,7 @@ struct ace_private
int board_idx;
u16 pci_command;
u8 pci_latency;
- char name[48];
+ const char *name;
#ifdef INDEX_DEBUG
spinlock_t debug_lock
__attribute__ ((aligned (SMP_CACHE_BYTES)));
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 8855b20e3d8e31..a39b58c8102e8a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -87,9 +87,8 @@
* cb_to_use is the next CB to use for queuing a command; cb_to_clean
* is the next CB to check for completion; cb_to_send is the first
* CB to start on in case of a previous failure to resume. CB clean
- * up happens in interrupt context in response to a CU interrupt, or
- * in dev->poll in the case where NAPI is enabled. cbs_avail keeps
- * track of number of free CB resources available.
+ * up happens in interrupt context in response to a CU interrupt.
+ * cbs_avail keeps track of number of free CB resources available.
*
* Hardware padding of short packets to minimum packet size is
* enabled. 82557 pads with 7Eh, while the later controllers pad
@@ -112,9 +111,8 @@
* replacement RFDs cannot be allocated, or the RU goes non-active,
* the RU must be restarted. Frame arrival generates an interrupt,
* and Rx indication and re-allocation happen in the same context,
- * therefore no locking is required. If NAPI is enabled, this work
- * happens in dev->poll. A software-generated interrupt is gen-
- * erated from the watchdog to recover from a failed allocation
+ * therefore no locking is required. A software-generated interrupt
+ * is generated from the watchdog to recover from a failed allocation
* senario where all Rx resources have been indicated and none re-
* placed.
*
@@ -126,8 +124,6 @@
* supported. Tx Scatter/Gather is not supported. Jumbo Frames is
* not supported (hardware limitation).
*
- * NAPI support is enabled with CONFIG_E100_NAPI.
- *
* MagicPacket(tm) WoL support is enabled/disabled via ethtool.
*
* Thanks to JC (jchapman@katalix.com) for helping with
@@ -158,7 +154,8 @@
#define DRV_NAME "e100"
-#define DRV_VERSION "3.0.18"
+#define DRV_EXT "-NAPI"
+#define DRV_VERSION "3.0.27-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
#define PFX DRV_NAME ": "
@@ -201,6 +198,8 @@ static struct pci_device_id e100_id_table[] = {
INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
@@ -242,6 +241,7 @@ enum phy {
phy_nsc_tx = 0x5C002000,
phy_82562_et = 0x033002A8,
phy_82562_em = 0x032002A8,
+ phy_82562_ek = 0x031002A8,
phy_82562_eh = 0x017002A8,
phy_unknown = 0xFFFFFFFF,
};
@@ -330,11 +330,16 @@ enum eeprom_op {
};
enum eeprom_offsets {
+ eeprom_cnfg_mdix = 0x03,
eeprom_id = 0x0A,
eeprom_config_asf = 0x0D,
eeprom_smbus_addr = 0x90,
};
+enum eeprom_cnfg_mdix {
+ eeprom_mdix_enabled = 0x0080,
+};
+
enum eeprom_id {
eeprom_id_wol = 0x0020,
};
@@ -350,10 +355,12 @@ enum cb_status {
};
enum cb_command {
+ cb_nop = 0x0000,
cb_iaaddr = 0x0001,
cb_config = 0x0002,
cb_multi = 0x0003,
cb_tx = 0x0004,
+ cb_ucode = 0x0005,
cb_dump = 0x0006,
cb_tx_sf = 0x0008,
cb_cid = 0x1f00,
@@ -428,12 +435,14 @@ struct multi {
};
/* Important: keep total struct u32-aligned */
+#define UCODE_SIZE 134
struct cb {
u16 status;
u16 command;
u32 link;
union {
u8 iaaddr[ETH_ALEN];
+ u32 ucode[UCODE_SIZE];
struct config config;
struct multi multi;
struct {
@@ -548,6 +557,7 @@ struct nic {
u32 rx_fc_pause;
u32 rx_fc_unsupported;
u32 rx_tco_frames;
+ u32 rx_over_length_errors;
u8 rev_id;
u16 leds;
@@ -980,6 +990,27 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
}
+static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ int i;
+ static const u32 ucode[UCODE_SIZE] = {
+ /* NFS packets are misinterpreted as TCO packets and
+ * incorrectly routed to the BMC over SMBus. This
+ * microcode patch checks the fragmented IP bit in the
+ * NFS/UDP header to distinguish between NFS and TCO. */
+ 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF,
+ 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000,
+ 0x00906EFD, 0x00900EFD, 0x00E00EF8,
+ };
+
+ if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
+ for(i = 0; i < UCODE_SIZE; i++)
+ cb->u.ucode[i] = cpu_to_le32(ucode[i]);
+ cb->command = cpu_to_le16(cb_ucode);
+ } else
+ cb->command = cpu_to_le16(cb_nop);
+}
+
static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
@@ -1045,7 +1076,9 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
}
- if(nic->mac >= mac_82550_D102)
+ if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
+ (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
+ (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)))
/* enable/disable MDI/MDI-X auto-switching */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
@@ -1069,6 +1102,8 @@ static int e100_hw_init(struct nic *nic)
return err;
if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
return err;
+ if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
+ return err;
if((err = e100_exec_cb(nic, NULL, e100_configure)))
return err;
if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
@@ -1143,9 +1178,11 @@ static void e100_update_stats(struct nic *nic)
ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
le32_to_cpu(s->tx_lost_crs);
ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
- ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors);
+ ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
+ nic->rx_over_length_errors;
ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
+ ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
le32_to_cpu(s->rx_alignment_errors) +
@@ -1456,18 +1493,14 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
dev_kfree_skb_any(skb);
} else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) {
/* Don't indicate oversized frames */
- nic->net_stats.rx_over_errors++;
+ nic->rx_over_length_errors++;
nic->net_stats.rx_dropped++;
dev_kfree_skb_any(skb);
} else {
nic->net_stats.rx_packets++;
nic->net_stats.rx_bytes += actual_size;
nic->netdev->last_rx = jiffies;
-#ifdef CONFIG_E100_NAPI
netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
if(work_done)
(*work_done)++;
}
@@ -1562,20 +1595,12 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
if(stat_ack & stat_ack_rnr)
nic->ru_running = 0;
-#ifdef CONFIG_E100_NAPI
e100_disable_irq(nic);
netif_rx_schedule(netdev);
-#else
- if(stat_ack & stat_ack_rx)
- e100_rx_clean(nic, NULL, 0);
- if(stat_ack & stat_ack_tx)
- e100_tx_clean(nic);
-#endif
return IRQ_HANDLED;
}
-#ifdef CONFIG_E100_NAPI
static int e100_poll(struct net_device *netdev, int *budget)
{
struct nic *nic = netdev_priv(netdev);
@@ -1598,7 +1623,6 @@ static int e100_poll(struct net_device *netdev, int *budget)
return 1;
}
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
static void e100_netpoll(struct net_device *netdev)
@@ -1641,7 +1665,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
- return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1055) &&
+ return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
(nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
@@ -1961,18 +1985,27 @@ static int e100_diag_test_count(struct net_device *netdev)
static void e100_diag_test(struct net_device *netdev,
struct ethtool_test *test, u64 *data)
{
+ struct ethtool_cmd cmd;
struct nic *nic = netdev_priv(netdev);
- int i;
+ int i, err;
memset(data, 0, E100_TEST_LEN * sizeof(u64));
data[0] = !mii_link_ok(&nic->mii);
data[1] = e100_eeprom_load(nic);
if(test->flags & ETH_TEST_FL_OFFLINE) {
+
+ /* save speed, duplex & autoneg settings */
+ err = mii_ethtool_gset(&nic->mii, &cmd);
+
if(netif_running(netdev))
e100_down(nic);
data[2] = e100_self_test(nic);
data[3] = e100_loopback_test(nic, lb_mac);
data[4] = e100_loopback_test(nic, lb_phy);
+
+ /* restore speed, duplex & autoneg settings */
+ err = mii_ethtool_sset(&nic->mii, &cmd);
+
if(netif_running(netdev))
e100_up(nic);
}
@@ -2135,10 +2168,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
netdev->tx_timeout = e100_tx_timeout;
netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
-#ifdef CONFIG_E100_NAPI
netdev->poll = e100_poll;
netdev->weight = E100_NAPI_WEIGHT;
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = e100_netpoll;
#endif
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 067bb099e77f98..108e33145ab600 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -49,6 +49,7 @@
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/pagemap.h>
@@ -63,6 +64,7 @@
#include <linux/udp.h>
#include <net/pkt_sched.h>
#include <linux/list.h>
+#include <linux/rtnetlink.h>
#include <linux/reboot.h>
#ifdef NETIF_F_TSO
#include <net/checksum.h>
@@ -77,6 +79,8 @@
#define BAR_1 1
#define BAR_5 5
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
struct e1000_adapter;
@@ -98,11 +102,12 @@ struct e1000_adapter;
#define E1000_MAX_INTR 10
-/* How many descriptors for TX and RX ? */
+/* TX/RX descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 256
#define E1000_MIN_TXD 80
#define E1000_MAX_82544_TXD 4096
+
#define E1000_DEFAULT_RXD 256
#define E1000_MAX_RXD 256
#define E1000_MIN_RXD 80
@@ -123,14 +128,11 @@ struct e1000_adapter;
#define E1000_TX_HEAD_ADDR_SHIFT 7
#define E1000_PBA_TX_MASK 0xFFFF0000
-/* Flow Control High-Watermark: 5688 bytes below Rx FIFO size */
-#define E1000_FC_HIGH_DIFF 0x1638
-
-/* Flow Control Low-Watermark: 5696 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
-/* Flow Control Pause Time: 858 usec */
-#define E1000_FC_PAUSE_TIME 0x0680
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
@@ -153,9 +155,9 @@ struct e1000_adapter;
struct e1000_buffer {
struct sk_buff *skb;
uint64_t dma;
- unsigned long length;
unsigned long time_stamp;
- unsigned int next_to_watch;
+ uint16_t length;
+ uint16_t next_to_watch;
};
struct e1000_desc_ring {
@@ -202,7 +204,7 @@ struct e1000_adapter {
spinlock_t stats_lock;
atomic_t irq_sem;
struct work_struct tx_timeout_task;
- uint8_t fc_autoneg;
+ uint8_t fc_autoneg;
struct timer_list blink_timer;
unsigned long led_status;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 9ca716eab3e97f..2e4544eca333c4 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -88,9 +88,9 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
{ "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
{ "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+ { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
- { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
- { "rx_long_byte_count", E1000_STAT(stats.gorcl) }
+ { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }
};
#define E1000_STATS_LEN \
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
@@ -170,7 +170,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->duplex = -1;
}
- ecmd->autoneg = (hw->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+ ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
@@ -192,6 +193,7 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if(netif_running(adapter->netdev)) {
e1000_down(adapter);
+ e1000_reset(adapter);
e1000_up(adapter);
} else
e1000_reset(adapter);
@@ -199,12 +201,13 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void
+static void
e1000_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct e1000_adapter *adapter = netdev->priv;
struct e1000_hw *hw = &adapter->hw;
+
pause->autoneg =
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -218,9 +221,9 @@ e1000_get_pauseparam(struct net_device *netdev,
}
}
-static int
+static int
e1000_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct e1000_adapter *adapter = netdev->priv;
struct e1000_hw *hw = &adapter->hw;
@@ -271,7 +274,7 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
e1000_reset(adapter);
return 0;
}
-
+
static uint32_t
e1000_get_tx_csum(struct net_device *netdev)
{
@@ -337,7 +340,7 @@ e1000_get_regs_len(struct net_device *netdev)
static void
e1000_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+ struct ethtool_regs *regs, void *p)
{
struct e1000_adapter *adapter = netdev->priv;
struct e1000_hw *hw = &adapter->hw;
@@ -418,6 +421,10 @@ e1000_get_regs(struct net_device *netdev,
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ if(hw->mac_type >= e1000_82540 &&
+ hw->media_type == e1000_media_type_copper) {
+ regs_buff[26] = E1000_READ_REG(hw, MANC);
+ }
}
static int
@@ -438,7 +445,7 @@ e1000_get_eeprom(struct net_device *netdev,
int ret_val = 0;
uint16_t i;
- if(eeprom->len == 0)
+ if(eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -446,9 +453,9 @@ e1000_get_eeprom(struct net_device *netdev,
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(uint16_t) *
+ eeprom_buff = kmalloc(sizeof(uint16_t) *
(last_word - first_word + 1), GFP_KERNEL);
- if (!eeprom_buff)
+ if(!eeprom_buff)
return -ENOMEM;
if(hw->eeprom.type == e1000_eeprom_spi)
@@ -466,9 +473,8 @@ e1000_get_eeprom(struct net_device *netdev,
for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]);
-
- memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset%2),
- eeprom->len);
+ memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
kfree(eeprom_buff);
return ret_val;
@@ -520,6 +526,7 @@ e1000_set_eeprom(struct net_device *netdev,
le16_to_cpus(&eeprom_buff[i]);
memcpy(ptr, bytes, eeprom->len);
+
for (i = 0; i < last_word - first_word + 1; i++)
eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
@@ -575,17 +582,16 @@ static int
e1000_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
- int err;
struct e1000_adapter *adapter = netdev->priv;
e1000_mac_type mac_type = adapter->hw.mac_type;
struct e1000_desc_ring *txdr = &adapter->tx_ring;
struct e1000_desc_ring *rxdr = &adapter->rx_ring;
- struct e1000_desc_ring tx_old, tx_new;
- struct e1000_desc_ring rx_old, rx_new;
+ struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new;
+ int err;
tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring;
-
+
if(netif_running(adapter->netdev))
e1000_down(adapter);
@@ -600,15 +606,15 @@ e1000_set_ringparam(struct net_device *netdev,
E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
if(netif_running(adapter->netdev)) {
- /* try to get new resources before deleting old */
+ /* Try to get new resources before deleting old */
if((err = e1000_setup_rx_resources(adapter)))
goto err_setup_rx;
if((err = e1000_setup_tx_resources(adapter)))
goto err_setup_tx;
/* save the new, restore the old in order to free it,
- * then restore the new back again */
-
+ * then restore the new back again */
+
rx_new = adapter->rx_ring;
tx_new = adapter->tx_ring;
adapter->rx_ring = rx_old;
@@ -620,6 +626,7 @@ e1000_set_ringparam(struct net_device *netdev,
if((err = e1000_up(adapter)))
return err;
}
+
return 0;
err_setup_tx:
e1000_free_rx_resources(adapter);
@@ -766,13 +773,15 @@ static int
e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
{
struct net_device *netdev = adapter->netdev;
- uint32_t icr, mask, i=0;
+ uint32_t icr, mask, i=0, shared_int = TRUE;
+ uint32_t irq = adapter->pdev->irq;
*data = 0;
/* Hook up test interrupt handler just for this test */
- if(request_irq(adapter->pdev->irq, &e1000_test_intr, SA_SHIRQ,
- netdev->name, netdev)) {
+ if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+ shared_int = FALSE;
+ } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, netdev->name, netdev)){
*data = 1;
return -1;
}
@@ -802,20 +811,22 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Interrupt to test */
mask = 1 << i;
- /* Disable the interrupt to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- E1000_WRITE_REG(&adapter->hw, IMC, mask);
- E1000_WRITE_REG(&adapter->hw, ICS, mask);
- msec_delay(10);
-
- if(adapter->test_icr & mask) {
- *data = 3;
- break;
+ if(!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ E1000_WRITE_REG(&adapter->hw, IMC, mask);
+ E1000_WRITE_REG(&adapter->hw, ICS, mask);
+ msec_delay(10);
+
+ if(adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
}
/* Enable the interrupt to be reported in
@@ -834,20 +845,22 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
break;
}
- /* Disable the other interrupts to be reported in
- * the cause register and then force the other
- * interrupts and see if any get posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- E1000_WRITE_REG(&adapter->hw, IMC, ~mask);
- E1000_WRITE_REG(&adapter->hw, ICS, ~mask);
- msec_delay(10);
+ if(!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ E1000_WRITE_REG(&adapter->hw, IMC, ~mask);
+ E1000_WRITE_REG(&adapter->hw, ICS, ~mask);
+ msec_delay(10);
- if(adapter->test_icr) {
- *data = 5;
- break;
+ if(adapter->test_icr) {
+ *data = 5;
+ break;
+ }
}
}
@@ -856,7 +869,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
msec_delay(10);
/* Unhook test interrupt handler */
- free_irq(adapter->pdev->irq, netdev);
+ free_irq(irq, netdev);
return *data;
}
@@ -1021,7 +1034,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
return 0;
- err_nomem:
+err_nomem:
e1000_free_desc_rings(adapter);
return ret_val;
}
@@ -1312,15 +1325,15 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
for(i = 0; i < 64; i++) {
e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024);
- pci_dma_sync_single(pdev, txdr->buffer_info[i].dma,
- txdr->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma,
+ txdr->buffer_info[i].length,
+ PCI_DMA_TODEVICE);
}
E1000_WRITE_REG(&adapter->hw, TDT, i);
msec_delay(200);
- pci_dma_sync_single(pdev, rxdr->buffer_info[0].dma,
+ pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[0].dma,
rxdr->buffer_info[0].length, PCI_DMA_FROMDEVICE);
return e1000_check_lbtest_frame(rxdr->buffer_info[0].skb, 1024);
@@ -1357,7 +1370,7 @@ e1000_diag_test_count(struct net_device *netdev)
}
static void
-e1000_diag_test(struct net_device *netdev,
+e1000_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, uint64_t *data)
{
struct e1000_adapter *adapter = netdev->priv;
@@ -1368,7 +1381,7 @@ e1000_diag_test(struct net_device *netdev,
/* save speed, duplex, autoneg settings */
uint16_t autoneg_advertised = adapter->hw.autoneg_advertised;
- uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
+ uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
uint8_t autoneg = adapter->hw.autoneg;
/* Link test performed before hardware reset so autoneg doesn't
@@ -1396,10 +1409,11 @@ e1000_diag_test(struct net_device *netdev,
if(e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* restore Autoneg/speed/duplex settings */
+ /* restore speed, duplex, autoneg settings */
adapter->hw.autoneg_advertised = autoneg_advertised;
- adapter->hw.forced_speed_duplex = forced_speed_duplex;
- adapter->hw.autoneg = autoneg;
+ adapter->hw.forced_speed_duplex = forced_speed_duplex;
+ adapter->hw.autoneg = autoneg;
+
e1000_reset(adapter);
if(if_running)
e1000_up(adapter);
@@ -1427,6 +1441,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER:
case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -1469,6 +1484,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER:
case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
return wol->wolopts ? -EOPNOTSUPP : 0;
case E1000_DEV_ID_82546EB_FIBER:
@@ -1571,8 +1587,8 @@ e1000_get_ethtool_stats(struct net_device *netdev,
e1000_update_stats(adapter);
for(i = 0; i < E1000_STATS_LEN; i++) {
char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
- data[i] = (e1000_gstrings_stats[i].sizeof_stat == sizeof(uint64_t))
- ? *(uint64_t *)p : *(uint32_t *)p;
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
}
}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 41c05e5c37cb3c..7c8cd3990c1b86 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -251,6 +251,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82541ER:
case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
case E1000_DEV_ID_82541GI_MOBILE:
hw->mac_type = e1000_82541_rev_2;
break;
@@ -920,7 +921,8 @@ e1000_setup_copper_link(struct e1000_hw *hw)
if(ret_val)
return ret_val;
- if(hw->mac_type == e1000_82545_rev_3) {
+ if((hw->mac_type == e1000_82545_rev_3) ||
+ (hw->mac_type == e1000_82546_rev_3)) {
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
phy_data |= 0x00000008;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
@@ -3057,16 +3059,6 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
}
break;
default:
- eeprom->type = e1000_eeprom_spi;
- eeprom->opcode_bits = 8;
- eeprom->delay_usec = 1;
- if (eecd & E1000_EECD_ADDR_BITS) {
- eeprom->page_size = 32;
- eeprom->address_bits = 16;
- } else {
- eeprom->page_size = 8;
- eeprom->address_bits = 8;
- }
break;
}
@@ -3453,7 +3445,6 @@ e1000_read_eeprom(struct e1000_hw *hw,
uint32_t i = 0;
DEBUGFUNC("e1000_read_eeprom");
-
/* A check for invalid values: offset too large, too many words, and not
* enough words.
*/
@@ -5224,3 +5215,4 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw)
}
return FALSE;
}
+
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index c9ee93a0d2de8d..9fb134ded97d31 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -357,11 +357,11 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
#define E1000_DEV_ID_82547GI 0x1075
#define E1000_DEV_ID_82541GI 0x1076
#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82541GI_LF 0x107C
#define E1000_DEV_ID_82546GB_COPPER 0x1079
#define E1000_DEV_ID_82546GB_FIBER 0x107A
#define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82547EI 0x1019
-
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
@@ -1043,7 +1043,6 @@ struct e1000_hw {
#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
-
/* Register Bit Masks */
/* Device Control */
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4b9b775677c5ec..4e4b3db4e00e00 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -27,73 +27,69 @@
*******************************************************************************/
#include "e1000.h"
-#include <linux/rtnetlink.h>
/* Change Log
+ * 5.3.12 6/7/04
+ * - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com>
+ * - if_mii support and associated kcompat for older kernels
+ * - More errlogging support from Jon Mason <jonmason@us.ibm.com>
+ * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
*
- * 5.2.51 5/14/04
- * o set default configuration to 'NAPI disabled'. NAPI enabled driver
- * causes kernel panic when the interface is shutdown while data is being
- * transferred.
- * 5.2.47 5/04/04
- * o fixed ethtool -t implementation
- * 5.2.45 4/29/04
- * o fixed ethtool -e implementation
- * o Support for ethtool ops [Stephen Hemminger (shemminger@osdl.org)]
- * 5.2.42 4/26/04
- * o Added support for the DPRINTK macro for enhanced error logging. Some
- * parts of the patch were supplied by Jon Mason.
- * o Move the register_netdevice() donw in the probe routine due to a
- * loading/unloading test issue.
- * o Added a long RX byte count the the extra ethtool data members for BER
- * testing purposes.
- * 5.2.39 3/12/04
+ * 5.3.11 6/4/04
+ * - ethtool register dump reads MANC register conditionally.
+ *
+ * 5.3.10 6/1/04
*/
char e1000_driver_name[] = "e1000";
char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-char e1000_driver_version[] = "5.2.52-k4";
+#ifndef CONFIG_E1000_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+char e1000_driver_version[] = "5.3.19-k2"DRIVERNAPI;
char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
*
- * Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
*
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- * Class, Class Mask, private data (not used) }
+ * Macro expands to...
+ * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
static struct pci_device_id e1000_pci_tbl[] = {
- {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ INTEL_E1000_ETHERNET_DEVICE(0x1000),
+ INTEL_E1000_ETHERNET_DEVICE(0x1001),
+ INTEL_E1000_ETHERNET_DEVICE(0x1004),
+ INTEL_E1000_ETHERNET_DEVICE(0x1008),
+ INTEL_E1000_ETHERNET_DEVICE(0x1009),
+ INTEL_E1000_ETHERNET_DEVICE(0x100C),
+ INTEL_E1000_ETHERNET_DEVICE(0x100D),
+ INTEL_E1000_ETHERNET_DEVICE(0x100E),
+ INTEL_E1000_ETHERNET_DEVICE(0x100F),
+ INTEL_E1000_ETHERNET_DEVICE(0x1010),
+ INTEL_E1000_ETHERNET_DEVICE(0x1011),
+ INTEL_E1000_ETHERNET_DEVICE(0x1012),
+ INTEL_E1000_ETHERNET_DEVICE(0x1013),
+ INTEL_E1000_ETHERNET_DEVICE(0x1015),
+ INTEL_E1000_ETHERNET_DEVICE(0x1016),
+ INTEL_E1000_ETHERNET_DEVICE(0x1017),
+ INTEL_E1000_ETHERNET_DEVICE(0x1018),
+ INTEL_E1000_ETHERNET_DEVICE(0x1019),
+ INTEL_E1000_ETHERNET_DEVICE(0x101D),
+ INTEL_E1000_ETHERNET_DEVICE(0x101E),
+ INTEL_E1000_ETHERNET_DEVICE(0x1026),
+ INTEL_E1000_ETHERNET_DEVICE(0x1027),
+ INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1075),
+ INTEL_E1000_ETHERNET_DEVICE(0x1076),
+ INTEL_E1000_ETHERNET_DEVICE(0x1077),
+ INTEL_E1000_ETHERNET_DEVICE(0x1078),
+ INTEL_E1000_ETHERNET_DEVICE(0x1079),
+ INTEL_E1000_ETHERNET_DEVICE(0x107A),
+ INTEL_E1000_ETHERNET_DEVICE(0x107B),
+ INTEL_E1000_ETHERNET_DEVICE(0x107C),
/* required last entry */
{0,}
};
@@ -172,7 +168,7 @@ static int e1000_resume(struct pci_dev *pdev);
#ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
-static void e1000_netpoll (struct net_device *dev);
+static void e1000_netpoll (struct net_device *netdev);
#endif
struct notifier_block e1000_notifier_reboot = {
@@ -185,7 +181,6 @@ struct notifier_block e1000_notifier_reboot = {
extern void e1000_check_options(struct e1000_adapter *adapter);
-
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
@@ -202,7 +197,7 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL");
-static int debug = 3;
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
@@ -256,6 +251,14 @@ e1000_up(struct e1000_adapter *adapter)
/* hardware has been reset, we need to reload some things */
+ /* Reset the PHY if it was previously powered down */
+ if(adapter->hw.media_type == e1000_media_type_copper) {
+ uint16_t mii_reg;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ if(mii_reg & MII_CR_POWER_DOWN)
+ e1000_phy_reset(&adapter->hw);
+ }
+
e1000_set_multi(netdev);
e1000_restore_vlan(adapter);
@@ -294,6 +297,15 @@ e1000_down(struct e1000_adapter *adapter)
e1000_reset(adapter);
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
+
+ /* If WoL is not enabled
+ * Power down the PHY so no link is implied when interface is down */
+ if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) {
+ uint16_t mii_reg;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ }
}
void
@@ -323,10 +335,10 @@ e1000_reset(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, PBA, pba);
/* flow control settings */
- adapter->hw.fc_high_water =
- (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_HIGH_DIFF;
- adapter->hw.fc_low_water =
- (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_LOW_DIFF;
+ adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
+ E1000_FC_HIGH_DIFF;
+ adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
+ E1000_FC_LOW_DIFF;
adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
adapter->hw.fc_send_xon = 1;
adapter->hw.fc = adapter->hw.original_fc;
@@ -334,7 +346,8 @@ e1000_reset(struct e1000_adapter *adapter)
e1000_reset_hw(&adapter->hw);
if(adapter->hw.mac_type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, WUC, 0);
- e1000_init_hw(&adapter->hw);
+ if(e1000_init_hw(&adapter->hw))
+ DPRINTK(PROBE, ERR, "Hardware Error\n");
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
@@ -410,8 +423,8 @@ e1000_probe(struct pci_dev *pdev,
adapter->msg_enable = (1 << debug) - 1;
rtnl_lock();
- /* we need to set the name early since the DPRINTK macro needs it set */
- if (dev_alloc_name(netdev, netdev->name) < 0)
+ /* we need to set the name early for the DPRINTK macro */
+ if(dev_alloc_name(netdev, netdev->name) < 0)
goto err_free_unlock;
mmio_start = pci_resource_start(pdev, BAR_0);
@@ -476,7 +489,6 @@ e1000_probe(struct pci_dev *pdev,
}
#ifdef NETIF_F_TSO
-#ifdef BROKEN_ON_NON_IA_ARCHS
/* Disbaled for now until root-cause is found for
* hangs reported against non-IA archs. TSO can be
* enabled using ethtool -K eth<x> tso on */
@@ -484,11 +496,10 @@ e1000_probe(struct pci_dev *pdev,
(adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
#endif
-#endif
-
if(pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
/* before reading the EEPROM, reset the controller to
@@ -506,10 +517,12 @@ e1000_probe(struct pci_dev *pdev,
/* copy the MAC address out of the EEPROM */
- e1000_read_mac_addr(&adapter->hw);
+ if (e1000_read_mac_addr(&adapter->hw))
+ DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
if(!is_valid_ether_addr(netdev->dev_addr)) {
+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
}
@@ -569,10 +582,9 @@ e1000_probe(struct pci_dev *pdev,
adapter->wol |= E1000_WUFC_MAG;
/* reset the hardware with the new settings */
-
e1000_reset(adapter);
- /* since we are holding the rtnl lock already, call the no-lock version */
+ /* We're already holding the rtnl lock; call the no-lock version */
if((err = register_netdevice(netdev)))
goto err_register;
@@ -663,7 +675,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
/* identify the MAC */
- if (e1000_set_mac_type(hw)) {
+ if(e1000_set_mac_type(hw)) {
DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
return -EIO;
}
@@ -672,19 +684,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
e1000_init_eeprom_params(hw);
- if((hw->mac_type == e1000_82541) ||
- (hw->mac_type == e1000_82547) ||
- (hw->mac_type == e1000_82541_rev_2) ||
- (hw->mac_type == e1000_82547_rev_2))
+ switch(hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
hw->phy_init_script = 1;
+ break;
+ }
e1000_set_media_type(hw);
- if(hw->mac_type < e1000_82543)
- hw->report_tx_early = 0;
- else
- hw->report_tx_early = 1;
-
hw->wait_autoneg_complete = FALSE;
hw->tbi_compatibility_en = TRUE;
hw->adaptive_ifs = TRUE;
@@ -736,7 +748,7 @@ e1000_open(struct net_device *netdev)
if((err = e1000_up(adapter)))
goto err_up;
- return 0;
+ return E1000_SUCCESS;
err_up:
e1000_free_rx_resources(adapter);
@@ -788,8 +800,10 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
int size;
size = sizeof(struct e1000_buffer) * txdr->count;
- txdr->buffer_info = kmalloc(size, GFP_KERNEL);
+ txdr->buffer_info = vmalloc(size);
if(!txdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Transmit descriptor ring\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -801,7 +815,9 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if(!txdr->desc) {
- kfree(txdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Transmit descriptor ring\n");
+ vfree(txdr->buffer_info);
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
@@ -878,10 +894,10 @@ e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
E1000_TXD_CMD_IFCS;
- if(adapter->hw.report_tx_early == 1)
- adapter->txd_cmd |= E1000_TXD_CMD_RS;
- else
+ if(adapter->hw.mac_type < e1000_82543)
adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+ else
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
* need this to apply a workaround later in the send path. */
@@ -905,8 +921,10 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
int size;
size = sizeof(struct e1000_buffer) * rxdr->count;
- rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
+ rxdr->buffer_info = vmalloc(size);
if(!rxdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Recieve descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -919,7 +937,9 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
if(!rxdr->desc) {
- kfree(rxdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Recieve descriptor ring\n");
+ vfree(rxdr->buffer_info);
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -953,7 +973,9 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
else
rctl &= ~E1000_RCTL_SBP;
+ /* Setup buffer sizes */
rctl &= ~(E1000_RCTL_SZ_4096);
+ rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE);
switch (adapter->rx_buffer_len) {
case E1000_RXBUFFER_2048:
default:
@@ -961,13 +983,13 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
break;
case E1000_RXBUFFER_4096:
- rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ rctl |= E1000_RCTL_SZ_4096;
break;
case E1000_RXBUFFER_8192:
- rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ rctl |= E1000_RCTL_SZ_8192;
break;
case E1000_RXBUFFER_16384:
- rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ rctl |= E1000_RCTL_SZ_16384;
break;
}
@@ -989,13 +1011,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
uint32_t rctl;
uint32_t rxcsum;
- /* make sure receives are disabled while setting up the descriptors */
-
+ /* disable receives while setting up the descriptors */
rctl = E1000_READ_REG(&adapter->hw, RCTL);
E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
/* set the Receive Delay Timer Register */
-
E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
if(adapter->hw.mac_type >= e1000_82540) {
@@ -1006,7 +1026,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Setup the Base and Length of the Rx Descriptor Ring */
-
E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
@@ -1025,7 +1044,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Enable Receives */
-
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
}
@@ -1043,7 +1061,7 @@ e1000_free_tx_resources(struct e1000_adapter *adapter)
e1000_clean_tx_ring(adapter);
- kfree(adapter->tx_ring.buffer_info);
+ vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
pci_free_consistent(pdev, adapter->tx_ring.size,
@@ -1073,9 +1091,9 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
if(buffer_info->skb) {
pci_unmap_page(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
dev_kfree_skb(buffer_info->skb);
@@ -1112,7 +1130,7 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
e1000_clean_rx_ring(adapter);
- kfree(rx_ring->buffer_info);
+ vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
@@ -1141,12 +1159,11 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
if(buffer_info->skb) {
pci_unmap_single(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(buffer_info->skb);
-
buffer_info->skb = NULL;
}
}
@@ -1312,7 +1329,8 @@ e1000_set_multi(struct net_device *netdev)
e1000_leave_82542_rst(adapter);
}
-/* need to wait a few seconds after link up to get diagnostic information from the phy */
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
static void
e1000_update_phy_info(unsigned long data)
@@ -1420,7 +1438,7 @@ e1000_watchdog(unsigned long data)
adapter->tpt_old = adapter->stats.tpt;
adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
adapter->colc_old = adapter->stats.colc;
-
+
adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
adapter->gorcl_old = adapter->stats.gorcl;
adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
@@ -1477,8 +1495,9 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
#ifdef NETIF_F_TSO
struct e1000_context_desc *context_desc;
unsigned int i;
- uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
+ uint32_t cmd_length = 0;
uint16_t ipcse, tucse, mss;
+ uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
if(skb_shinfo(skb)->tso_size) {
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
@@ -1497,6 +1516,10 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
tucse = 0;
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
+ (skb->len - (hdr_len)));
+
i = adapter->tx_ring.next_to_use;
context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
@@ -1508,10 +1531,7 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
- context_desc->cmd_and_length = cpu_to_le32(
- E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
- E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
- (skb->len - (hdr_len)));
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
@@ -1528,22 +1548,21 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
{
struct e1000_context_desc *context_desc;
unsigned int i;
- uint8_t css, cso;
+ uint8_t css;
- if(skb->ip_summed == CHECKSUM_HW) {
+ if(likely(skb->ip_summed == CHECKSUM_HW)) {
css = skb->h.raw - skb->data;
- cso = (skb->h.raw + skb->csum) - skb->data;
i = adapter->tx_ring.next_to_use;
context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso = cso;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
- if(++i == adapter->tx_ring.count) i = 0;
+ if(unlikely(++i == adapter->tx_ring.count)) i = 0;
adapter->tx_ring.next_to_use = i;
return TRUE;
@@ -1567,7 +1586,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
unsigned int f;
len -= skb->data_len;
-
i = tx_ring->next_to_use;
while(len) {
@@ -1576,14 +1594,14 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
- if(mss && !nr_frags && size == len && size > 8)
+ if(unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
#endif
/* Workaround for potential 82544 hang in PCI-X. Avoid
* terminating buffers within evenly-aligned dwords. */
- if(adapter->pcix_82544 &&
+ if(unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
- size > 4)
+ size > 4))
size -= 4;
buffer_info->length = size;
@@ -1597,7 +1615,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
len -= size;
offset += size;
count++;
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
for(f = 0; f < nr_frags; f++) {
@@ -1613,15 +1631,15 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
- if(mss && f == (nr_frags-1) && size == len && size > 8)
+ if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
size -= 4;
#endif
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
* dwords. */
- if(adapter->pcix_82544 &&
+ if(unlikely(adapter->pcix_82544 &&
!((unsigned long)(frag->page+offset+size-1) & 4) &&
- size > 4)
+ size > 4))
size -= 4;
buffer_info->length = size;
@@ -1636,13 +1654,14 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
len -= size;
offset += size;
count++;
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
}
+
i = (i == 0) ? tx_ring->count - 1 : i - 1;
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
-
+
return count;
}
@@ -1655,18 +1674,18 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
unsigned int i;
- if(tx_flags & E1000_TX_FLAGS_TSO) {
+ if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
E1000_TXD_CMD_TSE;
txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
}
- if(tx_flags & E1000_TX_FLAGS_CSUM) {
+ if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
}
- if(tx_flags & E1000_TX_FLAGS_VLAN) {
+ if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
txd_lower |= E1000_TXD_CMD_VLE;
txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
}
@@ -1680,7 +1699,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
tx_desc->lower.data =
cpu_to_le32(txd_lower | buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -1733,7 +1752,7 @@ no_fifo_stall_required:
return 0;
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
static int
e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
@@ -1741,22 +1760,23 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned long flags;
unsigned int len = skb->len;
- int count = 0;
- unsigned int mss = 0;
+ unsigned long flags;
unsigned int nr_frags = 0;
+ unsigned int mss = 0;
+ int count = 0;
unsigned int f;
nr_frags = skb_shinfo(skb)->nr_frags;
len -= skb->data_len;
- if(skb->len <= 0) {
+
+ if(unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
return 0;
}
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->tso_size;
- /* The controller does a simple calculation to
+ /* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
@@ -1766,57 +1786,60 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
}
+
if((mss) || (skb->ip_summed == CHECKSUM_HW))
count++;
- count++; /*for sentinel desc*/
+ count++; /* for sentinel desc */
#else
if(skb->ip_summed == CHECKSUM_HW)
count++;
#endif
-
count += TXD_USE_COUNT(len, max_txd_pwr);
+
if(adapter->pcix_82544)
count++;
nr_frags = skb_shinfo(skb)->nr_frags;
for(f = 0; f < nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
- max_txd_pwr);
+ max_txd_pwr);
if(adapter->pcix_82544)
count += nr_frags;
-
+
spin_lock_irqsave(&adapter->tx_lock, flags);
- /* need: count + 2 desc gap to keep tail from touching
+
+ /* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
- if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2 ) {
+ if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return 1;
}
+
spin_unlock_irqrestore(&adapter->tx_lock, flags);
- if(adapter->hw.mac_type == e1000_82547) {
- if(e1000_82547_fifo_workaround(adapter, skb)) {
+ if(unlikely(adapter->hw.mac_type == e1000_82547)) {
+ if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
return 1;
}
}
- if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
tx_flags |= E1000_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
}
first = adapter->tx_ring.next_to_use;
- if(e1000_tso(adapter, skb))
+ if(likely(e1000_tso(adapter, skb)))
tx_flags |= E1000_TX_FLAGS_TSO;
- else if(e1000_tx_csum(adapter, skb))
+ else if(likely(e1000_tx_csum(adapter, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
- e1000_tx_queue(adapter,
- e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
+ e1000_tx_queue(adapter,
+ e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
tx_flags);
netdev->trans_start = jiffies;
@@ -1843,10 +1866,8 @@ e1000_tx_timeout_task(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev->priv;
- netif_device_detach(netdev);
e1000_down(adapter);
e1000_up(adapter);
- netif_device_attach(netdev);
}
/**
@@ -1905,7 +1926,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
}
if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
-
e1000_down(adapter);
e1000_up(adapter);
}
@@ -1951,8 +1971,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
- /* the rest of the counters are only modified here */
-
adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
adapter->stats.mpc += E1000_READ_REG(hw, MPC);
adapter->stats.scc += E1000_READ_REG(hw, SCC);
@@ -2076,7 +2094,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
static inline void
e1000_irq_enable(struct e1000_adapter *adapter)
{
- if(atomic_dec_and_test(&adapter->irq_sem)) {
+ if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(&adapter->hw);
}
@@ -2095,21 +2113,21 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev->priv;
struct e1000_hw *hw = &adapter->hw;
- uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
+ uint32_t icr = E1000_READ_REG(hw, ICR);
#ifndef CONFIG_E1000_NAPI
unsigned int i;
#endif
- if(!icr)
+ if(unlikely(!icr))
return IRQ_NONE; /* Not our interrupt */
- if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
mod_timer(&adapter->watchdog_timer, jiffies);
}
#ifdef CONFIG_E1000_NAPI
- if(netif_rx_schedule_prep(netdev)) {
+ if(likely(netif_rx_schedule_prep(netdev))) {
/* Disable interrupts and register for poll. The flush
of the posted write is intentionally left out.
@@ -2121,8 +2139,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
}
#else
for(i = 0; i < E1000_MAX_INTR; i++)
- if(!e1000_clean_rx_irq(adapter) &
- !e1000_clean_tx_irq(adapter))
+ if(unlikely(!e1000_clean_rx_irq(adapter) &
+ !e1000_clean_tx_irq(adapter)))
break;
#endif
@@ -2140,15 +2158,18 @@ e1000_clean(struct net_device *netdev, int *budget)
{
struct e1000_adapter *adapter = netdev->priv;
int work_to_do = min(*budget, netdev->quota);
+ int tx_cleaned;
int work_done = 0;
- e1000_clean_tx_irq(adapter);
+ tx_cleaned = e1000_clean_tx_irq(adapter);
e1000_clean_rx_irq(adapter, &work_done, work_to_do);
*budget -= work_done;
netdev->quota -= work_done;
- if(work_done < work_to_do || !netif_running(netdev)) {
+ /* if no Rx and Tx cleanup work was done, exit the polling mode */
+ if(!tx_cleaned || (work_done < work_to_do) ||
+ !netif_running(netdev)) {
netif_rx_complete(netdev);
e1000_irq_enable(adapter);
return 0;
@@ -2156,8 +2177,8 @@ e1000_clean(struct net_device *netdev, int *budget)
return (work_done >= work_to_do);
}
-#endif
+#endif
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
@@ -2174,31 +2195,25 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
unsigned int i, eop;
boolean_t cleaned = FALSE;
-
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
-
for(cleaned = FALSE; !cleaned; ) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
- if(buffer_info->dma) {
-
+ if(likely(buffer_info->dma)) {
pci_unmap_page(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
-
buffer_info->dma = 0;
}
if(buffer_info->skb) {
-
dev_kfree_skb_any(buffer_info->skb);
-
buffer_info->skb = NULL;
}
@@ -2207,7 +2222,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_desc->upper.data = 0;
cleaned = (i == eop);
- if(++i == tx_ring->count) i = 0;
+ if(unlikely(++i == tx_ring->count)) i = 0;
}
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -2218,7 +2233,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
spin_lock(&adapter->tx_lock);
- if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
+ if(unlikely(cleaned && netif_queue_stopped(netdev) &&
+ netif_carrier_ok(netdev)))
netif_wake_queue(netdev);
spin_unlock(&adapter->tx_lock);
@@ -2227,7 +2243,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
/**
- * e1000_clean_rx_irq - Send received data up the network stack,
+ * e1000_clean_rx_irq - Send received data up the network stack
* @adapter: board private structure
**/
@@ -2256,14 +2272,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
while(rx_desc->status & E1000_RXD_STAT_DD) {
buffer_info = &rx_ring->buffer_info[i];
-
#ifdef CONFIG_E1000_NAPI
if(*work_done >= work_to_do)
break;
-
(*work_done)++;
#endif
-
cleaned = TRUE;
pci_unmap_single(pdev,
@@ -2274,49 +2287,28 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
skb = buffer_info->skb;
length = le16_to_cpu(rx_desc->length);
- if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
-
+ if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
/* All receives must fit into a single buffer */
-
- E1000_DBG("%s: Receive packet consumed multiple buffers\n",
- netdev->name);
-
+ E1000_DBG("%s: Receive packet consumed multiple"
+ " buffers\n", netdev->name);
dev_kfree_skb_irq(skb);
- rx_desc->status = 0;
- buffer_info->skb = NULL;
-
- if(++i == rx_ring->count) i = 0;
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- continue;
+ goto next_desc;
}
- if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
-
+ if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
last_byte = *(skb->data + length - 1);
-
if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
rx_desc->errors, length, last_byte)) {
-
spin_lock_irqsave(&adapter->stats_lock, flags);
-
e1000_tbi_adjust_stats(&adapter->hw,
&adapter->stats,
length, skb->data);
-
spin_unlock_irqrestore(&adapter->stats_lock,
flags);
length--;
} else {
-
dev_kfree_skb_irq(skb);
- rx_desc->status = 0;
- buffer_info->skb = NULL;
-
- if(++i == rx_ring->count) i = 0;
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- continue;
+ goto next_desc;
}
}
@@ -2328,17 +2320,19 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_E1000_NAPI
- if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
+ if(unlikely(adapter->vlgrp &&
+ (rx_desc->status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special &
- E1000_RXD_SPC_VLAN_MASK));
+ le16_to_cpu(rx_desc->special &
+ E1000_RXD_SPC_VLAN_MASK));
} else {
netif_receive_skb(skb);
}
#else /* CONFIG_E1000_NAPI */
- if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
+ if(unlikely(adapter->vlgrp &&
+ (rx_desc->status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special &
+ le16_to_cpu(rx_desc->special &
E1000_RXD_SPC_VLAN_MASK));
} else {
netif_rx(skb);
@@ -2346,10 +2340,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
+next_desc:
rx_desc->status = 0;
buffer_info->skb = NULL;
-
- if(++i == rx_ring->count) i = 0;
+ if(unlikely(++i == rx_ring->count)) i = 0;
rx_desc = E1000_RX_DESC(*rx_ring, i);
}
@@ -2381,11 +2375,9 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
while(!buffer_info->skb) {
- rx_desc = E1000_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
-
- if(!skb) {
+ if(unlikely(!skb)) {
/* Better luck next round */
break;
}
@@ -2400,15 +2392,15 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- buffer_info->dma =
- pci_map_single(pdev,
- skb->data,
- adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ buffer_info->dma = pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
+ if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2418,7 +2410,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, RDT, i);
}
- if(++i == rx_ring->count) i = 0;
+ if(unlikely(++i == rx_ring->count)) i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
@@ -2537,22 +2529,24 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -EFAULT;
mii_reg = data->val_in;
if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
- data->val_in))
+ mii_reg))
return -EIO;
if (adapter->hw.phy_type == e1000_phy_m88) {
switch (data->reg_num) {
case PHY_CTRL:
- if(data->val_in & MII_CR_AUTO_NEG_EN) {
+ if(mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if(mii_reg & MII_CR_AUTO_NEG_EN) {
adapter->hw.autoneg = 1;
adapter->hw.autoneg_advertised = 0x2F;
} else {
- if (data->val_in & 0x40)
+ if (mii_reg & 0x40)
spddplx = SPEED_1000;
- else if (data->val_in & 0x2000)
+ else if (mii_reg & 0x2000)
spddplx = SPEED_100;
else
spddplx = SPEED_10;
- spddplx += (data->val_in & 0x100)
+ spddplx += (mii_reg & 0x100)
? FULL_DUPLEX :
HALF_DUPLEX;
retval = e1000_set_spd_dplx(adapter,
@@ -2572,6 +2566,18 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -EIO;
break;
}
+ } else {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if(mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if(netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ break;
+ }
}
break;
default:
@@ -2593,11 +2599,11 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
struct sk_buff *skb)
{
/* 82543 or newer only */
- if((adapter->hw.mac_type < e1000_82543) ||
+ if(unlikely((adapter->hw.mac_type < e1000_82543) ||
/* Ignore Checksum bit is set */
(rx_desc->status & E1000_RXD_STAT_IXSM) ||
/* TCP Checksum has not been calculated */
- (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) {
+ (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
@@ -2609,7 +2615,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_err++;
} else {
- /* TCP checksum is good */
+ /* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
@@ -2620,7 +2626,8 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
{
struct e1000_adapter *adapter = hw->back;
- pci_set_mwi(adapter->pdev);
+ int ret;
+ ret = pci_set_mwi(adapter->pdev);
}
void
@@ -2670,26 +2677,22 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
if(grp) {
/* enable VLAN tag insert/strip */
-
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
/* enable VLAN receive filtering */
-
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_VFE;
rctl &= ~E1000_RCTL_CFIEN;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
} else {
/* disable VLAN tag insert/strip */
-
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
ctrl &= ~E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
/* disable VLAN filtering */
-
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl &= ~E1000_RCTL_VFE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
@@ -2705,7 +2708,6 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
uint32_t vfta, index;
/* add VID to filter table */
-
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
vfta |= (1 << (vid & 0x1F));
@@ -2725,8 +2727,7 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
e1000_irq_enable(adapter);
- /* remove VID from filter table*/
-
+ /* remove VID from filter table */
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
vfta &= ~(1 << (vid & 0x1F));
@@ -2772,6 +2773,8 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
+ DPRINTK(PROBE, ERR,
+ "Unsupported Speed/Duplexity configuration\n");
return -EINVAL;
}
return 0;
@@ -2865,6 +2868,8 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
}
}
+ pci_disable_device(pdev);
+
state = (state > 0) ? 3 : 0;
pci_set_power_state(pdev, state);
@@ -2879,6 +2884,7 @@ e1000_resume(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev->priv;
uint32_t manc;
+ pci_enable_device(pdev);
pci_set_power_state(pdev, 0);
pci_restore_state(pdev, adapter->pci_state);
@@ -2910,12 +2916,12 @@ e1000_resume(struct pci_dev *pdev)
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
-
-static void e1000_netpoll (struct net_device *dev)
+static void
+e1000_netpoll (struct net_device *netdev)
{
- struct e1000_adapter *adapter = dev->priv;
+ struct e1000_adapter *adapter = netdev->priv;
disable_irq(adapter->pdev->irq);
- e1000_intr (adapter->pdev->irq, dev, NULL);
+ e1000_intr(adapter->pdev->irq, netdev, NULL);
enable_irq(adapter->pdev->irq);
}
#endif
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 3e79a45de447e0..725a2328ce60bf 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -235,7 +235,7 @@ struct e1000_option {
static int __devinit
e1000_validate_option(int *value, struct e1000_option *opt,
- struct e1000_adapter *adapter)
+ struct e1000_adapter *adapter)
{
if(*value == OPTION_UNSET) {
*value = opt->def;
@@ -256,7 +256,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
case range_option:
if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
DPRINTK(PROBE, INFO,
- "%s set to %i\n", opt->name, *value);
+ "%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -449,8 +449,7 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, INFO, "%s turned off\n", opt.name);
break;
case 1:
- DPRINTK(PROBE, INFO,
- "%s set to dynamic mode\n", opt.name);
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", opt.name);
break;
default:
e1000_validate_option(&adapter->itr, &opt, adapter);
@@ -493,8 +492,9 @@ e1000_check_fiber_options(struct e1000_adapter *adapter)
"parameter ignored\n");
}
if((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
- DPRINTK(PROBE, INFO, "AutoNeg other than Full/1000 is "
- "not valid for fiber adapters, parameter ignored\n");
+ DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+ "not valid for fiber adapters, "
+ "parameter ignored\n");
}
}
@@ -611,24 +611,24 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
break;
case HALF_DUPLEX:
DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at Half Duplex only\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at Full Duplex only\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
- DPRINTK(PROBE, INFO,
- "10 Mbps Speed specified without Duplex\n");
+ DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+ "without Duplex\n");
DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
@@ -647,10 +647,10 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
- DPRINTK(PROBE, INFO,
- "100 Mbps Speed specified without Duplex\n");
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at 100 Mbps only\n");
+ DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+ "without Duplex\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
@@ -668,10 +668,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
+ DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+ "Duplex\n");
DPRINTK(PROBE, INFO,
- "1000 Mbps Speed specified without Duplex\n");
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ "Using Autonegotiation at 1000 Mbps "
+ "Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
@@ -679,7 +680,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, INFO,
"Half Duplex is not supported at 1000 Mbps\n");
DPRINTK(PROBE, INFO,
- "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ "Using Autonegotiation at 1000 Mbps "
+ "Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
@@ -696,8 +698,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
DPRINTK(PROBE, INFO,
- "Speed, AutoNeg and MDI-X specifications are "
- "incompatible. Setting MDI-X to a compatible value.\n");
+ "Speed, AutoNeg and MDI-X specifications are "
+ "incompatible. Setting MDI-X to a compatible value.\n");
}
}
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 36b5508f45826c..1132101ec73c0e 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -88,7 +88,6 @@ static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
#define PKT_BUF_SZ 1536
#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -2447,22 +2446,6 @@ static struct pci_driver eepro100_driver = {
#endif /* CONFIG_PM */
};
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
-static int pci_module_init(struct pci_driver *pdev)
-{
- int rc;
-
- rc = pci_register_driver(pdev);
- if (rc <= 0) {
- printk(KERN_INFO "%s: No cards found, driver not installed.\n",
- pdev->name);
- pci_unregister_driver(pdev);
- return -ENODEV;
- }
- return 0;
-}
-#endif
-
static int __init eepro100_init_module(void)
{
#ifdef MODULE
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 771bfc6721d7ec..ec327ad3a33f41 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -80,8 +80,6 @@
These may be modified when a driver module is loaded.*/
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static int max_interrupt_work = 32;
/* Used to pass the full-duplex flag, etc. */
#define MAX_UNITS 8 /* More are supported, limit only on options */
@@ -99,9 +97,9 @@ static int rx_copybreak;
Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings. */
-#define TX_RING_SIZE 16
-#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
-#define RX_RING_SIZE 32
+#define TX_RING_SIZE 256
+#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 256
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
@@ -152,12 +150,10 @@ MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_PARM(debug, "i");
-MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
-MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
@@ -289,6 +285,12 @@ enum CommandBits {
StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
};
+#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
+
+#define EpicNapiEvent (TxEmpty | TxDone | \
+ RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
+#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
+
static u16 media2miictl[16] = {
0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 };
@@ -327,9 +329,12 @@ struct epic_private {
/* Ring pointers. */
spinlock_t lock; /* Group with Tx control cache line. */
+ spinlock_t napi_lock;
+ unsigned int reschedule_in_poll;
unsigned int cur_tx, dirty_tx;
unsigned int cur_rx, dirty_rx;
+ u32 irq_mask;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct pci_dev *pci_dev; /* PCI bus location. */
@@ -356,7 +361,8 @@ static void epic_timer(unsigned long data);
static void epic_tx_timeout(struct net_device *dev);
static void epic_init_ring(struct net_device *dev);
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static int epic_rx(struct net_device *dev);
+static int epic_rx(struct net_device *dev, int budget);
+static int epic_poll(struct net_device *dev, int *budget);
static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct ethtool_ops netdev_ethtool_ops;
@@ -375,7 +381,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
int irq;
struct net_device *dev;
struct epic_private *ep;
- int i, option = 0, duplex = 0;
+ int i, ret, option = 0, duplex = 0;
void *ring_space;
dma_addr_t ring_dma;
@@ -389,29 +395,33 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
card_idx++;
- i = pci_enable_device(pdev);
- if (i)
- return i;
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out;
irq = pdev->irq;
if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out_disable;
}
pci_set_master(pdev);
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret < 0)
+ goto err_out_disable;
+
+ ret = -ENOMEM;
+
dev = alloc_etherdev(sizeof (*ep));
if (!dev) {
printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
- return -ENOMEM;
+ goto err_out_free_res;
}
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_request_regions(pdev, DRV_NAME))
- goto err_out_free_netdev;
-
#ifdef USE_IO_OPS
ioaddr = pci_resource_start (pdev, 0);
#else
@@ -419,7 +429,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
if (!ioaddr) {
printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
- goto err_out_free_res;
+ goto err_out_free_netdev;
}
#endif
@@ -456,7 +466,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
dev->base_addr = ioaddr;
dev->irq = irq;
- spin_lock_init (&ep->lock);
+ spin_lock_init(&ep->lock);
+ spin_lock_init(&ep->napi_lock);
+ ep->reschedule_in_poll = 0;
/* Bring the chip out of low-power mode. */
outl(0x4200, ioaddr + GENCTL);
@@ -486,6 +498,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ep->pci_dev = pdev;
ep->chip_id = chip_idx;
ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
+ ep->irq_mask =
+ (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun | EpicNapiEvent;
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but
@@ -540,10 +555,12 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->tx_timeout = &epic_tx_timeout;
+ dev->poll = epic_poll;
+ dev->weight = 64;
- i = register_netdev(dev);
- if (i)
- goto err_out_unmap_tx;
+ ret = register_netdev(dev);
+ if (ret < 0)
+ goto err_out_unmap_rx;
printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
@@ -551,19 +568,24 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x.\n", dev->dev_addr[i]);
- return 0;
+out:
+ return ret;
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
err_out_iounmap:
#ifndef USE_IO_OPS
iounmap(ioaddr);
-err_out_free_res:
-#endif
- pci_release_regions(pdev);
err_out_free_netdev:
+#endif
free_netdev(dev);
- return -ENODEV;
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+ goto out;
}
/* Serial EEPROM section. */
@@ -589,6 +611,38 @@ err_out_free_netdev:
#define EE_READ256_CMD (6 << 8)
#define EE_ERASE_CMD (7 << 6)
+static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ outl(0x00000000, ioaddr + INTMASK);
+}
+
+static inline void __epic_pci_commit(long ioaddr)
+{
+#ifndef USE_IO_OPS
+ inl(ioaddr + INTMASK);
+#endif
+}
+
+static inline void epic_napi_irq_off(struct net_device *dev,
+ struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
+ __epic_pci_commit(ioaddr);
+}
+
+static inline void epic_napi_irq_on(struct net_device *dev,
+ struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ /* No need to commit possible posted write */
+ outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
+}
+
static int __devinit read_eeprom(long ioaddr, int location)
{
int i;
@@ -749,9 +803,8 @@ static int epic_open(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
- | CntFull | TxUnderrun | TxDone | TxEmpty
- | RxError | RxOverflow | RxFull | RxHeader | RxDone,
- ioaddr + INTMASK);
+ | CntFull | TxUnderrun
+ | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
if (debug > 1)
printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
@@ -792,7 +845,7 @@ static void epic_pause(struct net_device *dev)
}
/* Remove the packets on the Rx queue. */
- epic_rx(dev);
+ epic_rx(dev, RX_RING_SIZE);
}
static void epic_restart(struct net_device *dev)
@@ -838,9 +891,9 @@ static void epic_restart(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
- | CntFull | TxUnderrun | TxDone | TxEmpty
- | RxError | RxOverflow | RxFull | RxHeader | RxDone,
- ioaddr + INTMASK);
+ | CntFull | TxUnderrun
+ | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+
printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
" interrupt %4.4x.\n",
dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
@@ -926,7 +979,6 @@ static void epic_init_ring(struct net_device *dev)
int i;
ep->tx_full = 0;
- ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
ep->dirty_tx = ep->cur_tx = 0;
ep->cur_rx = ep->dirty_rx = 0;
ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
@@ -1026,6 +1078,76 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
+ int status)
+{
+ struct net_device_stats *stats = &ep->stats;
+
+#ifndef final_version
+ /* There was an major error, log it. */
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+#endif
+ stats->tx_errors++;
+ if (status & 0x1050)
+ stats->tx_aborted_errors++;
+ if (status & 0x0008)
+ stats->tx_carrier_errors++;
+ if (status & 0x0040)
+ stats->tx_window_errors++;
+ if (status & 0x0010)
+ stats->tx_fifo_errors++;
+}
+
+static void epic_tx(struct net_device *dev, struct epic_private *ep)
+{
+ unsigned int dirty_tx, cur_tx;
+
+ /*
+ * Note: if this lock becomes a problem we can narrow the locked
+ * region at the cost of occasionally grabbing the lock more times.
+ */
+ cur_tx = ep->cur_tx;
+ for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
+ struct sk_buff *skb;
+ int entry = dirty_tx % TX_RING_SIZE;
+ int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
+
+ if (txstatus & DescOwn)
+ break; /* It still hasn't been Txed */
+
+ if (likely(txstatus & 0x0001)) {
+ ep->stats.collisions += (txstatus >> 8) & 15;
+ ep->stats.tx_packets++;
+ ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
+ } else
+ epic_tx_error(dev, ep, txstatus);
+
+ /* Free the original skb. */
+ skb = ep->tx_skbuff[entry];
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ ep->tx_skbuff[entry] = NULL;
+ }
+
+#ifndef final_version
+ if (cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_WARNING
+ "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, cur_tx, ep->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+ ep->dirty_tx = dirty_tx;
+ if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ ep->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+}
+
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -1033,135 +1155,71 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *r
struct net_device *dev = dev_instance;
struct epic_private *ep = dev->priv;
long ioaddr = dev->base_addr;
- int status, boguscnt = max_interrupt_work;
unsigned int handled = 0;
+ int status;
- do {
- status = inl(ioaddr + INTSTAT);
- /* Acknowledge all of the current interrupt sources ASAP. */
- outl(status & 0x00007fff, ioaddr + INTSTAT);
+ status = inl(ioaddr + INTSTAT);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(status & EpicNormalEvent, ioaddr + INTSTAT);
- if (debug > 4)
- printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
- "intstat=%#8.8x.\n",
- dev->name, status, (int)inl(ioaddr + INTSTAT));
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
+ "intstat=%#8.8x.\n", dev->name, status,
+ (int)inl(ioaddr + INTSTAT));
+ }
- if ((status & IntrSummary) == 0)
- break;
- handled = 1;
-
- if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
- epic_rx(dev);
-
- if (status & (TxEmpty | TxDone)) {
- unsigned int dirty_tx, cur_tx;
-
- /* Note: if this lock becomes a problem we can narrow the locked
- region at the cost of occasionally grabbing the lock more
- times. */
- spin_lock(&ep->lock);
- cur_tx = ep->cur_tx;
- dirty_tx = ep->dirty_tx;
- for (; cur_tx - dirty_tx > 0; dirty_tx++) {
- struct sk_buff *skb;
- int entry = dirty_tx % TX_RING_SIZE;
- int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
-
- if (txstatus & DescOwn)
- break; /* It still hasn't been Txed */
-
- if ( ! (txstatus & 0x0001)) {
- /* There was an major error, log it. */
-#ifndef final_version
- if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, txstatus);
-#endif
- ep->stats.tx_errors++;
- if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
- if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
- if (txstatus & 0x0040) ep->stats.tx_window_errors++;
- if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
- } else {
- ep->stats.collisions += (txstatus >> 8) & 15;
- ep->stats.tx_packets++;
- ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
- }
-
- /* Free the original skb. */
- skb = ep->tx_skbuff[entry];
- pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
- ep->tx_skbuff[entry] = NULL;
- }
+ if ((status & IntrSummary) == 0)
+ goto out;
-#ifndef final_version
- if (cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, cur_tx, ep->tx_full);
- dirty_tx += TX_RING_SIZE;
- }
-#endif
- ep->dirty_tx = dirty_tx;
- if (ep->tx_full
- && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
- /* The ring is no longer full, allow new TX entries. */
- ep->tx_full = 0;
- spin_unlock(&ep->lock);
- netif_wake_queue(dev);
- } else
- spin_unlock(&ep->lock);
- }
+ handled = 1;
- /* Check uncommon events all at once. */
- if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
- PCIBusErr170 | PCIBusErr175)) {
- if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
- break;
- /* Always update the error counts to avoid overhead later. */
- ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
- ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
- ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
-
- if (status & TxUnderrun) { /* Tx FIFO underflow. */
- ep->stats.tx_fifo_errors++;
- outl(ep->tx_threshold += 128, ioaddr + TxThresh);
- /* Restart the transmit process. */
- outl(RestartTx, ioaddr + COMMAND);
- }
- if (status & RxOverflow) { /* Missed a Rx frame. */
- ep->stats.rx_errors++;
- }
- if (status & (RxOverflow | RxFull))
- outw(RxQueued, ioaddr + COMMAND);
- if (status & PCIBusErr170) {
- printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
- dev->name, status);
- epic_pause(dev);
- epic_restart(dev);
- }
- /* Clear all error sources. */
- outl(status & 0x7f18, ioaddr + INTSTAT);
+ if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
+ spin_lock(&ep->napi_lock);
+ if (netif_rx_schedule_prep(dev)) {
+ epic_napi_irq_off(dev, ep);
+ __netif_rx_schedule(dev);
+ } else
+ ep->reschedule_in_poll++;
+ spin_unlock(&ep->napi_lock);
+ }
+ status &= ~EpicNapiEvent;
+
+ /* Check uncommon events all at once. */
+ if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
+ if (status == EpicRemoved)
+ goto out;
+
+ /* Always update the error counts to avoid overhead later. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+
+ if (status & TxUnderrun) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+ outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+ /* Restart the transmit process. */
+ outl(RestartTx, ioaddr + COMMAND);
}
- if (--boguscnt < 0) {
- printk(KERN_ERR "%s: Too much work at interrupt, "
- "IntrStatus=0x%8.8x.\n",
- dev->name, status);
- /* Clear all interrupt sources. */
- outl(0x0001ffff, ioaddr + INTSTAT);
- break;
+ if (status & PCIBusErr170) {
+ printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
+ dev->name, status);
+ epic_pause(dev);
+ epic_restart(dev);
}
- } while (1);
+ /* Clear all error sources. */
+ outl(status & 0x7f18, ioaddr + INTSTAT);
+ }
- if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
- dev->name, status);
+out:
+ if (debug > 3) {
+ printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
+ dev->name, status);
+ }
return IRQ_RETVAL(handled);
}
-static int epic_rx(struct net_device *dev)
+static int epic_rx(struct net_device *dev, int budget)
{
struct epic_private *ep = dev->priv;
int entry = ep->cur_rx % RX_RING_SIZE;
@@ -1171,6 +1229,10 @@ static int epic_rx(struct net_device *dev)
if (debug > 4)
printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
ep->rx_ring[entry].rxstatus);
+
+ if (rx_work_limit > budget)
+ rx_work_limit = budget;
+
/* If we own the next entry, it's a new packet. Send it up. */
while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
@@ -1226,7 +1288,7 @@ static int epic_rx(struct net_device *dev)
ep->rx_skbuff[entry] = NULL;
}
skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
+ netif_receive_skb(skb);
dev->last_rx = jiffies;
ep->stats.rx_packets++;
ep->stats.rx_bytes += pkt_len;
@@ -1254,6 +1316,65 @@ static int epic_rx(struct net_device *dev)
return work_done;
}
+static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+ int status;
+
+ status = inl(ioaddr + INTSTAT);
+
+ if (status == EpicRemoved)
+ return;
+ if (status & RxOverflow) /* Missed a Rx frame. */
+ ep->stats.rx_errors++;
+ if (status & (RxOverflow | RxFull))
+ outw(RxQueued, ioaddr + COMMAND);
+}
+
+static int epic_poll(struct net_device *dev, int *budget)
+{
+ struct epic_private *ep = dev->priv;
+ int work_done, orig_budget;
+ long ioaddr = dev->base_addr;
+
+ orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
+
+rx_action:
+
+ epic_tx(dev, ep);
+
+ work_done = epic_rx(dev, *budget);
+
+ epic_rx_err(dev, ep);
+
+ *budget -= work_done;
+ dev->quota -= work_done;
+
+ if (netif_running(dev) && (work_done < orig_budget)) {
+ unsigned long flags;
+ int more;
+
+ /* A bit baroque but it avoids a (space hungry) spin_unlock */
+
+ spin_lock_irqsave(&ep->napi_lock, flags);
+
+ more = ep->reschedule_in_poll;
+ if (!more) {
+ __netif_rx_complete(dev);
+ outl(EpicNapiEvent, ioaddr + INTSTAT);
+ epic_napi_irq_on(dev, ep);
+ } else
+ ep->reschedule_in_poll--;
+
+ spin_unlock_irqrestore(&ep->napi_lock, flags);
+
+ if (more)
+ goto rx_action;
+ }
+
+ return (work_done >= orig_budget);
+}
+
static int epic_close(struct net_device *dev)
{
long ioaddr = dev->base_addr;
@@ -1268,9 +1389,13 @@ static int epic_close(struct net_device *dev)
dev->name, (int)inl(ioaddr + INTSTAT));
del_timer_sync(&ep->timer);
- epic_pause(dev);
+
+ epic_disable_int(dev, ep);
+
free_irq(dev->irq, dev);
+ epic_pause(dev);
+
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = ep->rx_skbuff[i];
@@ -1491,6 +1616,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
#endif
pci_release_regions(pdev);
free_netdev(dev);
+ pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
/* pci_power_off(pdev, -1); */
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
new file mode 100644
index 00000000000000..36ff688cbd63c5
--- /dev/null
+++ b/drivers/net/gianfar.c
@@ -0,0 +1,1921 @@
+/*
+ * drivers/net/gianfar.c
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Gianfar: AKA Lambda Draconis, "Dragon"
+ * RA 11 31 24.2
+ * Dec +69 19 52
+ * V 3.84
+ * B-V +1.62
+ *
+ * Theory of operation
+ * This driver is designed for the Triple-speed Ethernet
+ * controllers on the Freescale 8540/8560 integrated processors,
+ * as well as the Fast Ethernet Controller on the 8540.
+ *
+ * The driver is initialized through OCP. Structures which
+ * define the configuration needed by the board are defined in a
+ * board structure in arch/ppc/platforms (though I do not
+ * discount the possibility that other architectures could one
+ * day be supported. One assumption the driver currently makes
+ * is that the PHY is configured in such a way to advertise all
+ * capabilities. This is a sensible default, and on certain
+ * PHYs, changing this default encounters substantial errata
+ * issues. Future versions may remove this requirement, but for
+ * now, it is best for the firmware to ensure this is the case.
+ *
+ * The Gianfar Ethernet Controller uses a ring of buffer
+ * descriptors. The beginning is indicated by a register
+ * pointing to the physical address of the start of the ring.
+ * The end is determined by a "wrap" bit being set in the
+ * last descriptor of the ring.
+ *
+ * When a packet is received, the RXF bit in the
+ * IEVENT register is set, triggering an interrupt when the
+ * corresponding bit in the IMASK register is also set (if
+ * interrupt coalescing is active, then the interrupt may not
+ * happen immediately, but will wait until either a set number
+ * of frames or amount of time have passed.). In NAPI, the
+ * interrupt handler will signal there is work to be done, and
+ * exit. Without NAPI, the packet(s) will be handled
+ * immediately. Both methods will start at the last known empty
+ * descriptor, and process every subsequent descriptor until there
+ * are none left with data (NAPI will stop after a set number of
+ * packets to give time to other tasks, but will eventually
+ * process all the packets). The data arrives inside a
+ * pre-allocated skb, and so after the skb is passed up to the
+ * stack, a new skb must be allocated, and the address field in
+ * the buffer descriptor must be updated to indicate this new
+ * skb.
+ *
+ * When the kernel requests that a packet be transmitted, the
+ * driver starts where it left off last time, and points the
+ * descriptor at the buffer which was passed in. The driver
+ * then informs the DMA engine that there are packets ready to
+ * be transmitted. Once the controller is finished transmitting
+ * the packet, an interrupt may be triggered (under the same
+ * conditions as for reception, but depending on the TXF bit).
+ * The driver then cleans up the buffer.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+
+#include "gianfar.h"
+#include "gianfar_phy.h"
+#ifdef CONFIG_NET_FASTROUTE
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
+#define irqreturn_t void
+#define IRQ_HANDLED
+#endif
+
+#define TX_TIMEOUT (1*HZ)
+#define SKB_ALLOC_TIMEOUT 1000000
+#undef BRIEF_GFAR_ERRORS
+#undef VERBOSE_GFAR_ERRORS
+
+#ifdef CONFIG_GFAR_NAPI
+#define RECEIVE(x) netif_receive_skb(x)
+#else
+#define RECEIVE(x) netif_rx(x)
+#endif
+
+#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.0, "
+char gfar_driver_name[] = "Gianfar Ethernet";
+char gfar_driver_version[] = "1.0";
+
+int startup_gfar(struct net_device *dev);
+static int gfar_enet_open(struct net_device *dev);
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void gfar_timeout(struct net_device *dev);
+static int gfar_close(struct net_device *dev);
+struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
+static struct net_device_stats *gfar_get_stats(struct net_device *dev);
+static int gfar_set_mac_address(struct net_device *dev);
+static int gfar_change_mtu(struct net_device *dev, int new_mtu);
+static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
+irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void gfar_phy_change(void *data);
+static void gfar_phy_timer(unsigned long data);
+static void adjust_link(struct net_device *dev);
+static void init_registers(struct net_device *dev);
+static int init_phy(struct net_device *dev);
+static int gfar_probe(struct ocp_device *ocpdev);
+static void gfar_remove(struct ocp_device *ocpdev);
+void free_skb_resources(struct gfar_private *priv);
+static void gfar_set_multi(struct net_device *dev);
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_poll(struct net_device *dev, int *budget);
+#endif
+#ifdef CONFIG_NET_FASTROUTE
+static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst);
+#endif
+static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length);
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
+#else
+static int gfar_clean_rx_ring(struct net_device *dev);
+#endif
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
+
+extern struct ethtool_ops gfar_ethtool_ops;
+extern void gfar_gstrings_normon(struct net_device *dev, u32 stringset,
+ u8 * buf);
+extern void gfar_fill_stats_normon(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 * buf);
+extern int gfar_stats_count_normon(struct net_device *dev);
+
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Gianfar Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+/* Called by the ocp code to initialize device data structures
+ * required for bringing up the device
+ * returns 0 on success */
+static int gfar_probe(struct ocp_device *ocpdev)
+{
+ u32 tempval;
+ struct ocp_device *mdiodev;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct ocp_gfar_data *einfo;
+ int idx;
+ int err = 0;
+ struct ethtool_ops *dev_ethtool_ops;
+
+ einfo = (struct ocp_gfar_data *) ocpdev->def->additions;
+
+ if (einfo == NULL) {
+ printk(KERN_ERR "gfar %d: Missing additional data!\n",
+ ocpdev->def->index);
+
+ return -ENODEV;
+ }
+
+ /* get a pointer to the register memory which can
+ * configure the PHYs. If it's different from this set,
+ * get the device which has those regs */
+ if ((einfo->phyregidx >= 0) && (einfo->phyregidx != ocpdev->def->index)) {
+ mdiodev = ocp_find_device(OCP_ANY_ID,
+ OCP_FUNC_GFAR, einfo->phyregidx);
+
+ /* If the device which holds the MDIO regs isn't
+ * up, wait for it to come up */
+ if (mdiodev == NULL)
+ return -EAGAIN;
+ } else {
+ mdiodev = ocpdev;
+ }
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof (*priv));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+
+ /* Set the info in the priv to the current info */
+ priv->einfo = einfo;
+
+ /* get a pointer to the register memory */
+ priv->regs = (struct gfar *)
+ ioremap(ocpdev->def->paddr, sizeof (struct gfar));
+
+ if (priv->regs == NULL) {
+ err = -ENOMEM;
+ goto regs_fail;
+ }
+
+ /* Set the PHY base address */
+ priv->phyregs = (struct gfar *)
+ ioremap(mdiodev->def->paddr, sizeof (struct gfar));
+
+ if (priv->phyregs == NULL) {
+ err = -ENOMEM;
+ goto phy_regs_fail;
+ }
+
+ ocp_set_drvdata(ocpdev, dev);
+
+ /* Stop the DMA engine now, in case it was running before */
+ /* (The firmware could have used it, and left it running). */
+ /* To do this, we write Graceful Receive Stop and Graceful */
+ /* Transmit Stop, and then wait until the corresponding bits */
+ /* in IEVENT indicate the stops have completed. */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+
+ /* Reset MAC layer */
+ gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ gfar_write(&priv->regs->maccfg1, tempval);
+
+ /* Initialize MACCFG2. */
+ gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+
+ /* Initialize ECNTRL */
+ gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
+
+ /* Copy the station address into the dev structure, */
+ /* and into the address registers MAC_STNADDR1,2. */
+ /* Backwards, because little endian MACs are dumb. */
+ /* Don't set the regs if the firmware already did */
+ memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) (priv->regs);
+
+ SET_MODULE_OWNER(dev);
+
+ /* Fill in the dev structure */
+ dev->open = gfar_enet_open;
+ dev->hard_start_xmit = gfar_start_xmit;
+ dev->tx_timeout = gfar_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_GFAR_NAPI
+ dev->poll = gfar_poll;
+ dev->weight = GFAR_DEV_WEIGHT;
+#endif
+ dev->stop = gfar_close;
+ dev->get_stats = gfar_get_stats;
+ dev->change_mtu = gfar_change_mtu;
+ dev->mtu = 1500;
+ dev->set_multicast_list = gfar_set_multi;
+ dev->flags |= IFF_MULTICAST;
+
+ dev_ethtool_ops =
+ (struct ethtool_ops *)kmalloc(sizeof(struct ethtool_ops),
+ GFP_KERNEL);
+
+ if(dev_ethtool_ops == NULL) {
+ err = -ENOMEM;
+ goto ethtool_fail;
+ }
+
+ memcpy(dev_ethtool_ops, &gfar_ethtool_ops, sizeof(gfar_ethtool_ops));
+
+ /* If there is no RMON support in this device, we don't
+ * want to expose non-existant statistics */
+ if((priv->einfo->flags & GFAR_HAS_RMON) == 0) {
+ dev_ethtool_ops->get_strings = gfar_gstrings_normon;
+ dev_ethtool_ops->get_stats_count = gfar_stats_count_normon;
+ dev_ethtool_ops->get_ethtool_stats = gfar_fill_stats_normon;
+ }
+
+ if((priv->einfo->flags & GFAR_HAS_COALESCE) == 0) {
+ dev_ethtool_ops->set_coalesce = NULL;
+ dev_ethtool_ops->get_coalesce = NULL;
+ }
+
+ dev->ethtool_ops = dev_ethtool_ops;
+
+#ifdef CONFIG_NET_FASTROUTE
+ dev->accept_fastpath = gfar_accept_fastpath;
+#endif
+
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+#ifdef CONFIG_GFAR_BUFSTASH
+ priv->rx_stash_size = STASH_LENGTH;
+#endif
+ priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
+
+ /* Initially, coalescing is disabled */
+ priv->txcoalescing = 0;
+ priv->txcount = 0;
+ priv->txtime = 0;
+ priv->rxcoalescing = 0;
+ priv->rxcount = 0;
+ priv->rxtime = 0;
+
+ err = register_netdev(dev);
+
+ if (err) {
+ printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+ dev->name);
+ goto register_fail;
+ }
+
+ /* Print out the device info */
+ printk(DEVICE_NAME, dev->name);
+ for (idx = 0; idx < 6; idx++)
+ printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
+ printk("\n");
+
+ /* Even more device info helps when determining which kernel */
+ /* provided which set of benchmarks. Since this is global for all */
+ /* devices, we only print it once */
+#ifdef CONFIG_GFAR_NAPI
+ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
+#else
+ printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
+#endif
+ printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
+ dev->name, priv->rx_ring_size, priv->tx_ring_size);
+
+ return 0;
+
+
+register_fail:
+ kfree(dev_ethtool_ops);
+ethtool_fail:
+ iounmap((void *) priv->phyregs);
+phy_regs_fail:
+ iounmap((void *) priv->regs);
+regs_fail:
+ free_netdev(dev);
+ return -ENOMEM;
+}
+
+static void gfar_remove(struct ocp_device *ocpdev)
+{
+ struct net_device *dev = ocp_get_drvdata(ocpdev);
+ struct gfar_private *priv = netdev_priv(dev);
+
+ ocp_set_drvdata(ocpdev, NULL);
+
+ kfree(dev->ethtool_ops);
+ iounmap((void *) priv->regs);
+ iounmap((void *) priv->phyregs);
+ free_netdev(dev);
+}
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_info *curphy;
+
+ priv->link = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->olddplx = -1;
+
+ /* get info for this PHY */
+ curphy = get_phy_info(dev);
+
+ if (curphy == NULL) {
+ printk(KERN_ERR "%s: No PHY found\n", dev->name);
+ return -1;
+ }
+
+ priv->phyinfo = curphy;
+
+ /* Run the commands which configure the PHY */
+ phy_run_commands(dev, curphy->config);
+
+ return 0;
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Initialize IMASK */
+ gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
+
+ /* Init hash registers to zero */
+ gfar_write(&priv->regs->iaddr0, 0);
+ gfar_write(&priv->regs->iaddr1, 0);
+ gfar_write(&priv->regs->iaddr2, 0);
+ gfar_write(&priv->regs->iaddr3, 0);
+ gfar_write(&priv->regs->iaddr4, 0);
+ gfar_write(&priv->regs->iaddr5, 0);
+ gfar_write(&priv->regs->iaddr6, 0);
+ gfar_write(&priv->regs->iaddr7, 0);
+
+ gfar_write(&priv->regs->gaddr0, 0);
+ gfar_write(&priv->regs->gaddr1, 0);
+ gfar_write(&priv->regs->gaddr2, 0);
+ gfar_write(&priv->regs->gaddr3, 0);
+ gfar_write(&priv->regs->gaddr4, 0);
+ gfar_write(&priv->regs->gaddr5, 0);
+ gfar_write(&priv->regs->gaddr6, 0);
+ gfar_write(&priv->regs->gaddr7, 0);
+
+ /* Zero out rctrl */
+ gfar_write(&priv->regs->rctrl, 0x00000000);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->einfo->flags & GFAR_HAS_RMON) {
+ memset((void *) &(priv->regs->rmon), 0,
+ sizeof (struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
+ gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
+ }
+
+ /* Initialize the max receive buffer length */
+ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+
+#ifdef CONFIG_GFAR_BUFSTASH
+ /* If we are stashing buffers, we need to set the
+ * extraction length to the size of the buffer */
+ gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
+#endif
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
+
+ /* Setup Attributes so that snooping is on for rx */
+ gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
+ gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
+
+ /* Assign the TBI an address which won't conflict with the PHYs */
+ gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
+}
+
+void stop_gfar(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ unsigned long flags;
+ u32 tempval;
+
+ /* Lock it down */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Tell the kernel the link is down */
+ priv->link = 0;
+ adjust_link(dev);
+
+ /* Mask all interrupts */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+
+ /* Clear all interrupts */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
+ != (DMACTRL_GRS | DMACTRL_GTS)) {
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+ }
+
+ /* Disable Rx and Tx */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
+ phy_run_commands(dev, priv->phyinfo->shutdown);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Free the IRQs */
+ if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
+ free_irq(priv->einfo->interruptError, dev);
+ free_irq(priv->einfo->interruptTransmit, dev);
+ free_irq(priv->einfo->interruptReceive, dev);
+ } else {
+ free_irq(priv->einfo->interruptTransmit, dev);
+ }
+
+ if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
+ free_irq(priv->einfo->interruptPHY, dev);
+ } else {
+ del_timer_sync(&priv->phy_info_timer);
+ }
+
+ free_skb_resources(priv);
+
+ dma_unmap_single(NULL, gfar_read(&regs->tbase),
+ sizeof(struct txbd)*priv->tx_ring_size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(NULL, gfar_read(&regs->rbase),
+ sizeof(struct rxbd)*priv->rx_ring_size,
+ DMA_BIDIRECTIONAL);
+
+ /* Free the buffer descriptors */
+ kfree(priv->tx_bd_base);
+}
+
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff */
+void free_skb_resources(struct gfar_private *priv)
+{
+ struct rxbd8 *rxbdp;
+ struct txbd8 *txbdp;
+ int i;
+
+ /* Go through all the buffer descriptors and free their data buffers */
+ txbdp = priv->tx_bd_base;
+
+ for (i = 0; i < priv->tx_ring_size; i++) {
+
+ if (priv->tx_skbuff[i]) {
+ dma_unmap_single(NULL, txbdp->bufPtr,
+ txbdp->length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+ }
+
+ kfree(priv->tx_skbuff);
+
+ rxbdp = priv->rx_bd_base;
+
+ /* rx_skbuff is not guaranteed to be allocated, so only
+ * free it and its contents if it is allocated */
+ if(priv->rx_skbuff != NULL) {
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(NULL, rxbdp->bufPtr,
+ priv->rx_buffer_size
+ + RXBUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->rx_skbuff[i] = NULL;
+ }
+
+ rxbdp->status = 0;
+ rxbdp->length = 0;
+ rxbdp->bufPtr = 0;
+
+ rxbdp++;
+ }
+
+ kfree(priv->rx_skbuff);
+ }
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *dev)
+{
+ struct txbd8 *txbdp;
+ struct rxbd8 *rxbdp;
+ unsigned long addr;
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+ int err = 0;
+
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+
+ /* Allocate memory for the buffer descriptors */
+ addr =
+ (unsigned int) kmalloc(sizeof (struct txbd8) * priv->tx_ring_size +
+ sizeof (struct rxbd8) * priv->rx_ring_size,
+ GFP_KERNEL);
+
+ if (addr == 0) {
+ printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
+ dev->name);
+ return -ENOMEM;
+ }
+
+ priv->tx_bd_base = (struct txbd8 *) addr;
+
+ /* enet DMA only understands physical addresses */
+ gfar_write(&regs->tbase,
+ dma_map_single(NULL, (void *)addr,
+ sizeof(struct txbd8) * priv->tx_ring_size,
+ DMA_BIDIRECTIONAL));
+
+ /* Start the rx descriptor ring where the tx ring leaves off */
+ addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
+ priv->rx_bd_base = (struct rxbd8 *) addr;
+ gfar_write(&regs->rbase,
+ dma_map_single(NULL, (void *)addr,
+ sizeof(struct rxbd8) * priv->rx_ring_size,
+ DMA_BIDIRECTIONAL));
+
+ /* Setup the skbuff rings */
+ priv->tx_skbuff =
+ (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
+ priv->tx_ring_size, GFP_KERNEL);
+
+ if (priv->tx_skbuff == NULL) {
+ printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
+ dev->name);
+ err = -ENOMEM;
+ goto tx_skb_fail;
+ }
+
+ for (i = 0; i < priv->tx_ring_size; i++)
+ priv->tx_skbuff[i] = NULL;
+
+ priv->rx_skbuff =
+ (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
+ priv->rx_ring_size, GFP_KERNEL);
+
+ if (priv->rx_skbuff == NULL) {
+ printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
+ dev->name);
+ err = -ENOMEM;
+ goto rx_skb_fail;
+ }
+
+ for (i = 0; i < priv->rx_ring_size; i++)
+ priv->rx_skbuff[i] = NULL;
+
+ /* Initialize some variables in our dev structure */
+ priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
+ priv->cur_rx = priv->rx_bd_base;
+ priv->skb_curtx = priv->skb_dirtytx = 0;
+ priv->skb_currx = 0;
+
+ /* Initialize Transmit Descriptor Ring */
+ txbdp = priv->tx_bd_base;
+ for (i = 0; i < priv->tx_ring_size; i++) {
+ txbdp->status = 0;
+ txbdp->length = 0;
+ txbdp->bufPtr = 0;
+ txbdp++;
+ }
+
+ /* Set the last descriptor in the ring to indicate wrap */
+ txbdp--;
+ txbdp->status |= TXBD_WRAP;
+
+ rxbdp = priv->rx_bd_base;
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct sk_buff *skb = NULL;
+
+ rxbdp->status = 0;
+
+ skb = gfar_new_skb(dev, rxbdp);
+
+ priv->rx_skbuff[i] = skb;
+
+ rxbdp++;
+ }
+
+ /* Set the last descriptor in the ring to wrap */
+ rxbdp--;
+ rxbdp->status |= RXBD_WRAP;
+
+ /* If the device has multiple interrupts, register for
+ * them. Otherwise, only register for the one */
+ if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
+ /* Install our interrupt handlers for Error,
+ * Transmit, and Receive */
+ if (request_irq(priv->einfo->interruptError, gfar_error,
+ 0, "enet_error", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->einfo->interruptError);
+
+ err = -1;
+ goto err_irq_fail;
+ }
+
+ if (request_irq(priv->einfo->interruptTransmit, gfar_transmit,
+ 0, "enet_tx", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->einfo->interruptTransmit);
+
+ err = -1;
+
+ goto tx_irq_fail;
+ }
+
+ if (request_irq(priv->einfo->interruptReceive, gfar_receive,
+ 0, "enet_rx", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
+ dev->name, priv->einfo->interruptReceive);
+
+ err = -1;
+ goto rx_irq_fail;
+ }
+ } else {
+ if (request_irq(priv->einfo->interruptTransmit, gfar_interrupt,
+ 0, "gfar_interrupt", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->einfo->interruptError);
+
+ err = -1;
+ goto err_irq_fail;
+ }
+ }
+
+ /* Grab the PHY interrupt */
+ if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
+ if (request_irq(priv->einfo->interruptPHY, phy_interrupt,
+ SA_SHIRQ, "phy_interrupt", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
+ dev->name, priv->einfo->interruptPHY);
+
+ err = -1;
+
+ if (priv->einfo->flags & GFAR_HAS_MULTI_INTR)
+ goto phy_irq_fail;
+ else
+ goto tx_irq_fail;
+ }
+ } else {
+ init_timer(&priv->phy_info_timer);
+ priv->phy_info_timer.function = &gfar_phy_timer;
+ priv->phy_info_timer.data = (unsigned long) dev;
+ mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
+ }
+
+ /* Set up the bottom half queue */
+ INIT_WORK(&priv->tq, (void (*)(void *))gfar_phy_change, dev);
+
+ /* Configure the PHY interrupt */
+ phy_run_commands(dev, priv->phyinfo->startup);
+
+ /* Tell the kernel the link is up, and determine the
+ * negotiated features (speed, duplex) */
+ adjust_link(dev);
+
+ if (priv->link == 0)
+ printk(KERN_INFO "%s: No link detected\n", dev->name);
+
+ /* Configure the coalescing support */
+ if (priv->txcoalescing)
+ gfar_write(&regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&regs->txic, 0);
+
+ if (priv->rxcoalescing)
+ gfar_write(&regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&regs->rxic, 0);
+
+ init_waitqueue_head(&priv->rxcleanupq);
+
+ /* Enable Rx and Tx in MACCFG1 */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ /* Clear THLT, so that the DMA starts polling now */
+ gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+
+ return 0;
+
+phy_irq_fail:
+ free_irq(priv->einfo->interruptReceive, dev);
+rx_irq_fail:
+ free_irq(priv->einfo->interruptTransmit, dev);
+tx_irq_fail:
+ free_irq(priv->einfo->interruptError, dev);
+err_irq_fail:
+rx_skb_fail:
+ free_skb_resources(priv);
+tx_skb_fail:
+ kfree(priv->tx_bd_base);
+ return err;
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int gfar_enet_open(struct net_device *dev)
+{
+ int err;
+
+ /* Initialize a bunch of registers */
+ init_registers(dev);
+
+ gfar_set_mac_address(dev);
+
+ err = init_phy(dev);
+
+ if (err)
+ return err;
+
+ err = startup_gfar(dev);
+
+ netif_start_queue(dev);
+
+ return err;
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *txbdp;
+
+ /* Update transmit stats */
+ priv->stats.tx_bytes += skb->len;
+
+ /* Lock priv now */
+ spin_lock_irq(&priv->lock);
+
+ /* Point at the first free tx descriptor */
+ txbdp = priv->cur_tx;
+
+ /* Clear all but the WRAP status flags */
+ txbdp->status &= TXBD_WRAP;
+
+ /* Set buffer length and pointer */
+ txbdp->length = skb->len;
+ txbdp->bufPtr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+
+ /* Save the skb pointer so we can free it later */
+ priv->tx_skbuff[priv->skb_curtx] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ priv->skb_curtx =
+ (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ /* Flag the BD as interrupt-causing */
+ txbdp->status |= TXBD_INTERRUPT;
+
+ /* Flag the BD as ready to go, last in frame, and */
+ /* in need of CRC */
+ txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
+
+ dev->trans_start = jiffies;
+
+ /* If this was the last BD in the ring, the next one */
+ /* is at the beginning of the ring */
+ if (txbdp->status & TXBD_WRAP)
+ txbdp = priv->tx_bd_base;
+ else
+ txbdp++;
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (txbdp == priv->dirty_tx) {
+ netif_stop_queue(dev);
+
+ priv->stats.tx_fifo_errors++;
+ }
+
+ /* Update the current txbd to the next one */
+ priv->cur_tx = txbdp;
+
+ /* Tell the DMA to go go go */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+
+ /* Unlock priv */
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int gfar_close(struct net_device *dev)
+{
+ stop_gfar(dev);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/* returns a net_device_stats structure pointer */
+static struct net_device_stats * gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ return &(priv->stats);
+}
+
+/* Changes the mac address if the controller is not running. */
+int gfar_set_mac_address(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i;
+ char tmpbuf[MAC_ADDR_LEN];
+ u32 tempval;
+
+ /* Now copy it into the mac registers backwards, cuz */
+ /* little endian is silly */
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
+
+ gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
+
+ tempval = *((u32 *) (tmpbuf + 4));
+
+ gfar_write(&priv->regs->macstnaddr2, tempval);
+
+ return 0;
+}
+
+/**********************************************************************
+ * gfar_accept_fastpath
+ *
+ * Used to authenticate to the kernel that a fast path entry can be
+ * added to device's routing table cache
+ *
+ * Input : pointer to ethernet interface network device structure and
+ * a pointer to the designated entry to be added to the cache.
+ * Output : zero upon success, negative upon failure
+ **********************************************************************/
+#ifdef CONFIG_NET_FASTROUTE
+static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
+{
+ struct net_device *odev = dst->dev;
+
+ if ((dst->ops->protocol != __constant_htons(ETH_P_IP))
+ || (odev->type != ARPHRD_ETHER)
+ || (odev->accept_fastpath == NULL)) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+/* try_fastroute() -- Checks the fastroute cache to see if a given packet
+ * can be routed immediately to another device. If it can, we send it.
+ * If we used a fastroute, we return 1. Otherwise, we return 0.
+ * Returns 0 if CONFIG_NET_FASTROUTE is not on
+ */
+static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length)
+{
+#ifdef CONFIG_NET_FASTROUTE
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ unsigned int hash;
+ struct rtable *rt;
+ struct net_device *odev;
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned int CPU_ID = smp_processor_id();
+
+ eth = (struct ethhdr *) (skb->data);
+
+ /* Only route ethernet IP packets */
+ if (eth->h_proto == __constant_htons(ETH_P_IP)) {
+ iph = (struct iphdr *) (skb->data + ETH_HLEN);
+
+ /* Generate the hash value */
+ hash = ((*(u8 *) &iph->daddr) ^ (*(u8 *) & iph->saddr)) & NETDEV_FASTROUTE_HMASK;
+
+ rt = (struct rtable *) (dev->fastpath[hash]);
+ if (rt != NULL
+ && ((*(u32 *) &iph->daddr) == (*(u32 *) &rt->key.dst))
+ && ((*(u32 *) &iph->saddr) == (*(u32 *) &rt->key.src))
+ && !(rt->u.dst.obsolete)) {
+ odev = rt->u.dst.dev;
+ netdev_rx_stat[CPU_ID].fastroute_hit++;
+
+ /* Make sure the packet is:
+ * 1) IPv4
+ * 2) without any options (header length of 5)
+ * 3) Not a multicast packet
+ * 4) going to a valid destination
+ * 5) Not out of time-to-live
+ */
+ if (iph->version == 4
+ && iph->ihl == 5
+ && (!(eth->h_dest[0] & 0x01))
+ && neigh_is_valid(rt->u.dst.neighbour)
+ && iph->ttl > 1) {
+
+ /* Fast Route Path: Taken if the outgoing device is ready to transmit the packet now */
+ if ((!netif_queue_stopped(odev))
+ && (!spin_is_locked(odev->xmit_lock))
+ && (skb->len <= (odev->mtu + ETH_HLEN + 2 + 4))) {
+
+ skb->pkt_type = PACKET_FASTROUTE;
+ skb->protocol = __constant_htons(ETH_P_IP);
+ ip_decrease_ttl(iph);
+ memcpy(eth->h_source, odev->dev_addr, MAC_ADDR_LEN);
+ memcpy(eth->h_dest, rt->u.dst.neighbour->ha, MAC_ADDR_LEN);
+ skb->dev = odev;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ if (odev->hard_start_xmit(skb, odev) != 0) {
+ panic("%s: FastRoute path corrupted", dev->name);
+ }
+ netdev_rx_stat[CPU_ID].fastroute_success++;
+ }
+
+ /* Semi Fast Route Path: Mark the packet as needing fast routing, but let the
+ * stack handle getting it to the device */
+ else {
+ skb->pkt_type = PACKET_FASTROUTE;
+ skb->nh.raw = skb->data + ETH_HLEN;
+ skb->protocol = __constant_htons(ETH_P_IP);
+ netdev_rx_stat[CPU_ID].fastroute_defer++;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ if(RECEIVE(skb) == NET_RX_DROP) {
+ priv->extra_stats.kernel_dropped++;
+ }
+ }
+
+ return 1;
+ }
+ }
+ }
+#endif /* CONFIG_NET_FASTROUTE */
+ return 0;
+}
+
+static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int tempsize, tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ int oldsize = priv->rx_buffer_size;
+ int frame_size = new_mtu + 18;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name);
+ return -EINVAL;
+ }
+
+ tempsize =
+ (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ /* Only stop and start the controller if it isn't already
+ * stopped */
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ stop_gfar(dev);
+
+ priv->rx_buffer_size = tempsize;
+
+ dev->mtu = new_mtu;
+
+ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+ gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length */
+ tempval = gfar_read(&priv->regs->maccfg2);
+
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
+ tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+ else
+ tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+
+ gfar_write(&priv->regs->maccfg2, tempval);
+
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ startup_gfar(dev);
+
+ return 0;
+}
+
+/* gfar_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem. */
+static void gfar_timeout(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ priv->stats.tx_errors++;
+
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ startup_gfar(dev);
+ }
+
+ if (!netif_queue_stopped(dev))
+ netif_schedule(dev);
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *bdp;
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
+
+ /* Lock priv */
+ spin_lock(&priv->lock);
+ bdp = priv->dirty_tx;
+ while ((bdp->status & TXBD_READY) == 0) {
+ /* If dirty_tx and cur_tx are the same, then either the */
+ /* ring is empty or full now (it could only be full in the beginning, */
+ /* obviously). If it is empty, we are done. */
+ if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
+ break;
+
+ priv->stats.tx_packets++;
+
+ /* Deferred means some collisions occurred during transmit, */
+ /* but we eventually sent the packet. */
+ if (bdp->status & TXBD_DEF)
+ priv->stats.collisions++;
+
+ /* Free the sk buffer associated with this TxBD */
+ dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+ priv->tx_skbuff[priv->skb_dirtytx] = NULL;
+ priv->skb_dirtytx =
+ (priv->skb_dirtytx +
+ 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ /* update bdp to point at next bd in the ring (wrapping if necessary) */
+ if (bdp->status & TXBD_WRAP)
+ bdp = priv->tx_bd_base;
+ else
+ bdp++;
+
+ /* Move dirty_tx to be the next bd */
+ priv->dirty_tx = bdp;
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } /* while ((bdp->status & TXBD_READY) == 0) */
+
+ /* If we are coalescing the interrupts, reset the timer */
+ /* Otherwise, clear it */
+ if (priv->txcoalescing)
+ gfar_write(&priv->regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&priv->regs->txic, 0);
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb = NULL;
+ unsigned int timeout = SKB_ALLOC_TIMEOUT;
+
+ /* We have to allocate the skb, so keep trying till we succeed */
+ while ((!skb) && timeout--)
+ skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
+
+ if (skb == NULL)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ RXBUF_ALIGNMENT -
+ (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
+
+ skb->dev = dev;
+
+ bdp->bufPtr = dma_map_single(NULL, skb->data,
+ priv->rx_buffer_size + RXBUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ bdp->length = 0;
+
+ /* Mark the buffer empty */
+ bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
+
+ return skb;
+}
+
+static inline void count_errors(unsigned short status, struct gfar_private *priv)
+{
+ struct net_device_stats *stats = &priv->stats;
+ struct gfar_extra_stats *estats = &priv->extra_stats;
+
+ /* If the packet was truncated, none of the other errors
+ * matter */
+ if (status & RXBD_TRUNCATED) {
+ stats->rx_length_errors++;
+
+ estats->rx_trunc++;
+
+ return;
+ }
+ /* Count the errors, if there were any */
+ if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ stats->rx_length_errors++;
+
+ if (status & RXBD_LARGE)
+ estats->rx_large++;
+ else
+ estats->rx_short++;
+ }
+ if (status & RXBD_NONOCTET) {
+ stats->rx_frame_errors++;
+ estats->rx_nonoctet++;
+ }
+ if (status & RXBD_CRCERR) {
+ estats->rx_crcerr++;
+ stats->rx_crc_errors++;
+ }
+ if (status & RXBD_OVERRUN) {
+ estats->rx_overrun++;
+ stats->rx_crc_errors++;
+ }
+}
+
+irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+#ifdef CONFIG_GFAR_NAPI
+ u32 tempval;
+#endif
+
+ /* Clear IEVENT, so rx interrupt isn't called again
+ * because of this interrupt */
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
+
+ /* support NAPI */
+#ifdef CONFIG_GFAR_NAPI
+ if (netif_rx_schedule_prep(dev)) {
+ tempval = gfar_read(&priv->regs->imask);
+ tempval &= IMASK_RX_DISABLED;
+ gfar_write(&priv->regs->imask, tempval);
+
+ __netif_rx_schedule(dev);
+ } else {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
+ dev->name, gfar_read(priv->regs->ievent),
+ gfar_read(priv->regs->imask));
+#endif
+ }
+#else
+
+ spin_lock(&priv->lock);
+ gfar_clean_rx_ring(dev);
+
+ /* If we are coalescing interrupts, update the timer */
+ /* Otherwise, clear it */
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ /* Just in case we need to wake the ring param changer */
+ priv->rxclean = 1;
+
+ spin_unlock(&priv->lock);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+
+/* gfar_process_frame() -- handle one incoming packet if skb
+ * isn't NULL. Try the fastroute before using the stack */
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int length)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (skb == NULL) {
+#ifdef BRIEF_GFAR_ERRORS
+ printk(KERN_WARNING "%s: Missing skb!!.\n",
+ dev->name);
+#endif
+ priv->stats.rx_dropped++;
+ priv->extra_stats.rx_skbmissing++;
+ } else {
+ if(try_fastroute(skb, dev, length) == 0) {
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Send the packet up the stack */
+ if (RECEIVE(skb) == NET_RX_DROP) {
+ priv->extra_stats.kernel_dropped++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
+ * until all are gone (or, in the case of NAPI, the budget/quota
+ * has been reached). Returns the number of frames handled
+ */
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
+#else
+static int gfar_clean_rx_ring(struct net_device *dev)
+#endif
+{
+ struct rxbd8 *bdp;
+ struct sk_buff *skb;
+ u16 pkt_len;
+ int howmany = 0;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Get the first full descriptor */
+ bdp = priv->cur_rx;
+
+#ifdef CONFIG_GFAR_NAPI
+#define GFAR_RXDONE() ((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))
+#else
+#define GFAR_RXDONE() (bdp->status & RXBD_EMPTY)
+#endif
+ while (!GFAR_RXDONE()) {
+ skb = priv->rx_skbuff[priv->skb_currx];
+
+ if (!(bdp->status &
+ (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
+ | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
+ /* Increment the number of packets */
+ priv->stats.rx_packets++;
+ howmany++;
+
+ /* Remove the FCS from the packet length */
+ pkt_len = bdp->length - 4;
+
+ gfar_process_frame(dev, skb, pkt_len);
+
+ priv->stats.rx_bytes += pkt_len;
+
+ } else {
+ count_errors(bdp->status, priv);
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ priv->rx_skbuff[priv->skb_currx] = NULL;
+ }
+
+ dev->last_rx = jiffies;
+
+ /* Clear the status flags for this buffer */
+ bdp->status &= ~RXBD_STATS;
+
+ /* Add another skb for the future */
+ skb = gfar_new_skb(dev, bdp);
+ priv->rx_skbuff[priv->skb_currx] = skb;
+
+ /* Update to the next pointer */
+ if (bdp->status & RXBD_WRAP)
+ bdp = priv->rx_bd_base;
+ else
+ bdp++;
+
+ /* update to point at the next skb */
+ priv->skb_currx =
+ (priv->skb_currx +
+ 1) & RX_RING_MOD_MASK(priv->rx_ring_size);
+
+ }
+
+ /* Update the current rxbd pointer to be the next one */
+ priv->cur_rx = bdp;
+
+ /* If no packets have arrived since the
+ * last one we processed, clear the IEVENT RX and
+ * BSY bits so that another interrupt won't be
+ * generated when we set IMASK */
+ if (bdp->status & RXBD_EMPTY)
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
+
+ return howmany;
+}
+
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_poll(struct net_device *dev, int *budget)
+{
+ int howmany;
+ struct gfar_private *priv = netdev_priv(dev);
+ int rx_work_limit = *budget;
+
+ if (rx_work_limit > dev->quota)
+ rx_work_limit = dev->quota;
+
+ spin_lock(&priv->lock);
+ howmany = gfar_clean_rx_ring(dev, rx_work_limit);
+
+ dev->quota -= howmany;
+ rx_work_limit -= howmany;
+ *budget -= howmany;
+
+ if (rx_work_limit >= 0) {
+ netif_rx_complete(dev);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+
+ gfar_write(&priv->regs->imask, IMASK_DEFAULT);
+
+ /* If we are coalescing interrupts, update the timer */
+ /* Otherwise, clear it */
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ /* Signal to the ring size changer that it's safe to go */
+ priv->rxclean = 1;
+ }
+
+ spin_unlock(priv->lock);
+
+ return (rx_work_limit < 0) ? 1 : 0;
+}
+#endif
+
+/* The interrupt handler for devices with one interrupt */
+static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&priv->regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, events);
+
+ /* Check for reception */
+ if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
+ gfar_receive(irq, dev_id, regs);
+
+ /* Check for transmit completion */
+ if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
+ gfar_transmit(irq, dev_id, regs);
+
+ /* Update error statistics */
+ if (events & IEVENT_TXE) {
+ priv->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ priv->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ priv->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_WARNING "%s: tx underrun. dropped packet\n",
+ dev->name);
+#endif
+ priv->stats.tx_dropped++;
+ priv->extra_stats.tx_underrun++;
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+ }
+ }
+ if (events & IEVENT_BSY) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_bsy++;
+
+ gfar_receive(irq, dev_id, regs);
+
+#ifndef CONFIG_GFAR_NAPI
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+#endif
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
+ gfar_read(priv->regs->rstat));
+#endif
+ }
+ if (events & IEVENT_BABR) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_babr++;
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babbling error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_EBERR) {
+ priv->extra_stats.eberr++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: EBERR\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_RXC) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: control frame\n", dev->name);
+#endif
+ }
+
+ if (events & IEVENT_BABT) {
+ priv->extra_stats.tx_babt++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babt error\n", dev->name);
+#endif
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Run the commands which acknowledge the interrupt */
+ phy_run_commands(dev, priv->phyinfo->ack_int);
+
+ /* Schedule the bottom half */
+ schedule_work(&priv->tq);
+
+ return IRQ_HANDLED;
+}
+
+/* Scheduled by the phy_interrupt/timer to handle PHY changes */
+static void gfar_phy_change(void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct gfar_private *priv = netdev_priv(dev);
+ int timeout = HZ / 1000 + 1;
+
+ /* Delay to give the PHY a chance to change the
+ * register state */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+
+ /* Run the commands which check the link state */
+ phy_run_commands(dev, priv->phyinfo->handle_int);
+
+ /* React to the change in state */
+ adjust_link(dev);
+}
+
+/* Called every so often on systems that don't interrupt
+ * the core for PHY changes */
+static void gfar_phy_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tq);
+
+ mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the priv structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+
+ if (priv->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (priv->duplexity != priv->olddplx) {
+ if (!(priv->duplexity)) {
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~(MACCFG2_FULL_DUPLEX);
+ gfar_write(&regs->maccfg2, tempval);
+
+ printk(KERN_INFO "%s: Half Duplex\n",
+ dev->name);
+ } else {
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_FULL_DUPLEX;
+ gfar_write(&regs->maccfg2, tempval);
+
+ printk(KERN_INFO "%s: Full Duplex\n",
+ dev->name);
+ }
+
+ priv->olddplx = priv->duplexity;
+ }
+
+ if (priv->speed != priv->oldspeed) {
+ switch (priv->speed) {
+ case 1000:
+ tempval = gfar_read(&regs->maccfg2);
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+ gfar_write(&regs->maccfg2, tempval);
+ break;
+ case 100:
+ case 10:
+ tempval = gfar_read(&regs->maccfg2);
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+ gfar_write(&regs->maccfg2, tempval);
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Ack! Speed (%d) is not 10/100/1000!\n",
+ dev->name, priv->speed);
+ break;
+ }
+
+ printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
+ priv->speed);
+
+ priv->oldspeed = priv->speed;
+ }
+
+ if (!priv->oldlink) {
+ printk(KERN_INFO "%s: Link is up\n", dev->name);
+ priv->oldlink = 1;
+ netif_carrier_on(dev);
+ netif_schedule(dev);
+ }
+ } else {
+ if (priv->oldlink) {
+ printk(KERN_INFO "%s: Link is down\n", dev->name);
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->olddplx = -1;
+ netif_carrier_off(dev);
+ }
+ }
+}
+
+
+/* Update the hash table based on the current list of multicast
+ * addresses we subscribe to. Also, change the promiscuity of
+ * the device based on the flags (this function is called
+ * whenever dev->flags is changed */
+static void gfar_set_multi(struct net_device *dev)
+{
+ struct dev_mc_list *mc_ptr;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+
+ if(dev->flags & IFF_PROMISC) {
+ printk(KERN_INFO "%s: Entering promiscuous mode.\n",
+ dev->name);
+ /* Set RCTRL to PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval |= RCTRL_PROM;
+ gfar_write(&regs->rctrl, tempval);
+ } else {
+ /* Set RCTRL to not PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval &= ~(RCTRL_PROM);
+ gfar_write(&regs->rctrl, tempval);
+ }
+
+ if(dev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ gfar_write(&regs->gaddr0, 0xffffffff);
+ gfar_write(&regs->gaddr1, 0xffffffff);
+ gfar_write(&regs->gaddr2, 0xffffffff);
+ gfar_write(&regs->gaddr3, 0xffffffff);
+ gfar_write(&regs->gaddr4, 0xffffffff);
+ gfar_write(&regs->gaddr5, 0xffffffff);
+ gfar_write(&regs->gaddr6, 0xffffffff);
+ gfar_write(&regs->gaddr7, 0xffffffff);
+ } else {
+ /* zero out the hash */
+ gfar_write(&regs->gaddr0, 0x0);
+ gfar_write(&regs->gaddr1, 0x0);
+ gfar_write(&regs->gaddr2, 0x0);
+ gfar_write(&regs->gaddr3, 0x0);
+ gfar_write(&regs->gaddr4, 0x0);
+ gfar_write(&regs->gaddr5, 0x0);
+ gfar_write(&regs->gaddr6, 0x0);
+ gfar_write(&regs->gaddr7, 0x0);
+
+ if(dev->mc_count == 0)
+ return;
+
+ /* Parse the list, and set the appropriate bits */
+ for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+ }
+ }
+
+ return;
+}
+
+/* Set the appropriate hash bit for the given addr */
+/* The algorithm works like so:
+ * 1) Take the Destination Address (ie the multicast address), and
+ * do a CRC on it (little endian), and reverse the bits of the
+ * result.
+ * 2) Use the 8 most significant bits as a hash into a 256-entry
+ * table. The table is controlled through 8 32-bit registers:
+ * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
+ * gaddr7. This means that the 3 most significant bits in the
+ * hash index which gaddr register to use, and the 5 other bits
+ * indicate which bit (assuming an IBM numbering scheme, which
+ * for PowerPC (tm) is usually the case) in the register holds
+ * the entry. */
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
+{
+ u32 tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 *hash = &regs->gaddr0;
+ u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ u8 whichreg = ((result >> 29) & 0x7);
+ u8 whichbit = ((result >> 24) & 0x1f);
+ u32 value = (1 << (31-whichbit));
+
+ tempval = gfar_read(&hash[whichreg]);
+ tempval |= value;
+ gfar_write(&hash[whichreg], tempval);
+
+ return;
+}
+
+/* GFAR error interrupt handler */
+static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&priv->regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
+
+ /* Hmm... */
+#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS)
+ printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ dev->name, events, gfar_read(priv->regs->imask));
+#endif
+
+ /* Update the error counters */
+ if (events & IEVENT_TXE) {
+ priv->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ priv->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ priv->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
+ dev->name);
+#endif
+ priv->stats.tx_dropped++;
+ priv->extra_stats.tx_underrun++;
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+ }
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_BSY) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_bsy++;
+
+ gfar_receive(irq, dev_id, regs);
+
+#ifndef CONFIG_GFAR_NAPI
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+#endif
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
+ gfar_read(priv->regs->rstat));
+#endif
+ }
+ if (events & IEVENT_BABR) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_babr++;
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babbling error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_EBERR) {
+ priv->extra_stats.eberr++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: EBERR\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_RXC)
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: control frame\n", dev->name);
+#endif
+
+ if (events & IEVENT_BABT) {
+ priv->extra_stats.tx_babt++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babt error\n", dev->name);
+#endif
+ }
+ return IRQ_HANDLED;
+}
+
+/* Structure for a device driver */
+static struct ocp_device_id gfar_ids[] = {
+ {.vendor = OCP_ANY_ID,.function = OCP_FUNC_GFAR},
+ {.vendor = OCP_VENDOR_INVALID}
+};
+
+static struct ocp_driver gfar_driver = {
+ .name = "gianfar",
+ .id_table = gfar_ids,
+
+ .probe = gfar_probe,
+ .remove = gfar_remove,
+};
+
+static int __init gfar_init(void)
+{
+ int rc;
+
+ rc = ocp_register_driver(&gfar_driver);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+ if (rc != 0) {
+#else
+ if (rc == 0) {
+#endif
+ ocp_unregister_driver(&gfar_driver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit gfar_exit(void)
+{
+ ocp_unregister_driver(&gfar_driver);
+}
+
+module_init(gfar_init);
+module_exit(gfar_exit);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
new file mode 100644
index 00000000000000..f7af3465ce07fc
--- /dev/null
+++ b/drivers/net/gianfar.h
@@ -0,0 +1,537 @@
+/*
+ * drivers/net/gianfar.h
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Still left to do:
+ * -Add support for module parameters
+ */
+#ifndef __GIANFAR_H
+#define __GIANFAR_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#define schedule_work schedule_task
+#endif
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/ocp.h>
+#include "gianfar_phy.h"
+
+/* The maximum number of packets to be handled in one call of gfar_poll */
+#define GFAR_DEV_WEIGHT 64
+
+/* Number of bytes to align the rx bufs to */
+#define RXBUF_ALIGNMENT 64
+
+/* The number of bytes which composes a unit for the purpose of
+ * allocating data buffers. ie-for any given MTU, the data buffer
+ * will be the next highest multiple of 512 bytes. */
+#define INCREMENTAL_BUFFER_SIZE 512
+
+
+#define MAC_ADDR_LEN 6
+
+extern char gfar_driver_name[];
+extern char gfar_driver_version[];
+
+/* These need to be powers of 2 for this driver */
+#ifdef CONFIG_GFAR_NAPI
+#define DEFAULT_TX_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+#else
+#define DEFAULT_TX_RING_SIZE 64
+#define DEFAULT_RX_RING_SIZE 64
+#endif
+
+#define GFAR_RX_MAX_RING_SIZE 256
+#define GFAR_TX_MAX_RING_SIZE 256
+
+#define DEFAULT_RX_BUFFER_SIZE 1536
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+#define JUMBO_BUFFER_SIZE 9728
+#define JUMBO_FRAME_SIZE 9600
+
+/* Latency of interface clock in nanoseconds */
+/* Interface clock latency , in this case, means the
+ * time described by a value of 1 in the interrupt
+ * coalescing registers' time fields. Since those fields
+ * refer to the time it takes for 64 clocks to pass, the
+ * latencies are as such:
+ * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick
+ * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick
+ * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick
+ */
+#define GFAR_GBIT_TIME 512
+#define GFAR_100_TIME 2560
+#define GFAR_10_TIME 25600
+
+#define DEFAULT_TXCOUNT 16
+#define DEFAULT_TXTIME 32768
+
+#define DEFAULT_RXCOUNT 16
+#define DEFAULT_RXTIME 32768
+
+#define TBIPA_VALUE 0x1f
+#define MIIMCFG_INIT_VALUE 0x00000007
+#define MIIMCFG_RESET 0x80000000
+#define MIIMIND_BUSY 0x00000001
+
+/* MAC register bits */
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RESET_RX_MC 0x00080000
+#define MACCFG1_RESET_TX_MC 0x00040000
+#define MACCFG1_RESET_RX_FUN 0x00020000
+#define MACCFG1_RESET_TX_FUN 0x00010000
+#define MACCFG1_LOOPBACK 0x00000100
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_SYNCD_RX_EN 0x00000008
+#define MACCFG1_RX_EN 0x00000004
+#define MACCFG1_SYNCD_TX_EN 0x00000002
+#define MACCFG1_TX_EN 0x00000001
+
+#define MACCFG2_INIT_SETTINGS 0x00007205
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_IF 0x00000300
+#define MACCFG2_MII 0x00000100
+#define MACCFG2_GMII 0x00000200
+#define MACCFG2_HUGEFRAME 0x00000020
+#define MACCFG2_LENGTHCHECK 0x00000010
+
+#define ECNTRL_INIT_SETTINGS 0x00001000
+#define ECNTRL_TBI_MODE 0x00000020
+
+#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
+
+#define MINFLR_INIT_SETTINGS 0x00000040
+
+/* Init to do tx snooping for buffers and descriptors */
+#define DMACTRL_INIT_SETTINGS 0x000000c3
+#define DMACTRL_GRS 0x00000010
+#define DMACTRL_GTS 0x00000008
+
+#define TSTAT_CLEAR_THALT 0x80000000
+
+/* Interrupt coalescing macros */
+#define IC_ICEN 0x80000000
+#define IC_ICFT_MASK 0x1fe00000
+#define IC_ICFT_SHIFT 21
+#define mk_ic_icft(x) \
+ (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK)
+#define IC_ICTT_MASK 0x0000ffff
+#define mk_ic_ictt(x) (x&IC_ICTT_MASK)
+
+#define mk_ic_value(count, time) (IC_ICEN | \
+ mk_ic_icft(count) | \
+ mk_ic_ictt(time))
+
+#define RCTRL_PROM 0x00000008
+#define RSTAT_CLEAR_RHALT 0x00800000
+
+#define IEVENT_INIT_CLEAR 0xffffffff
+#define IEVENT_BABR 0x80000000
+#define IEVENT_RXC 0x40000000
+#define IEVENT_BSY 0x20000000
+#define IEVENT_EBERR 0x10000000
+#define IEVENT_MSRO 0x04000000
+#define IEVENT_GTSC 0x02000000
+#define IEVENT_BABT 0x01000000
+#define IEVENT_TXC 0x00800000
+#define IEVENT_TXE 0x00400000
+#define IEVENT_TXB 0x00200000
+#define IEVENT_TXF 0x00100000
+#define IEVENT_LC 0x00040000
+#define IEVENT_CRL 0x00020000
+#define IEVENT_XFUN 0x00010000
+#define IEVENT_RXB0 0x00008000
+#define IEVENT_GRSC 0x00000100
+#define IEVENT_RXF0 0x00000080
+#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
+#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
+#define IEVENT_ERR_MASK \
+(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
+ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
+ | IEVENT_CRL | IEVENT_XFUN)
+
+#define IMASK_INIT_CLEAR 0x00000000
+#define IMASK_BABR 0x80000000
+#define IMASK_RXC 0x40000000
+#define IMASK_BSY 0x20000000
+#define IMASK_EBERR 0x10000000
+#define IMASK_MSRO 0x04000000
+#define IMASK_GRSC 0x02000000
+#define IMASK_BABT 0x01000000
+#define IMASK_TXC 0x00800000
+#define IMASK_TXEEN 0x00400000
+#define IMASK_TXBEN 0x00200000
+#define IMASK_TXFEN 0x00100000
+#define IMASK_LC 0x00040000
+#define IMASK_CRL 0x00020000
+#define IMASK_XFUN 0x00010000
+#define IMASK_RXB0 0x00008000
+#define IMASK_GTSC 0x00000100
+#define IMASK_RXFEN0 0x00000080
+#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT)
+
+
+/* Attribute fields */
+
+/* This enables rx snooping for buffers and descriptors */
+#ifdef CONFIG_GFAR_BDSTASH
+#define ATTR_BDSTASH 0x00000800
+#else
+#define ATTR_BDSTASH 0x00000000
+#endif
+
+#ifdef CONFIG_GFAR_BUFSTASH
+#define ATTR_BUFSTASH 0x00004000
+#define STASH_LENGTH 64
+#else
+#define ATTR_BUFSTASH 0x00000000
+#endif
+
+#define ATTR_SNOOPING 0x000000c0
+#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \
+ | ATTR_BDSTASH | ATTR_BUFSTASH)
+
+#define ATTRELI_INIT_SETTINGS 0x0
+
+
+/* TxBD status field bits */
+#define TXBD_READY 0x8000
+#define TXBD_PADCRC 0x4000
+#define TXBD_WRAP 0x2000
+#define TXBD_INTERRUPT 0x1000
+#define TXBD_LAST 0x0800
+#define TXBD_CRC 0x0400
+#define TXBD_DEF 0x0200
+#define TXBD_HUGEFRAME 0x0080
+#define TXBD_LATECOLLISION 0x0080
+#define TXBD_RETRYLIMIT 0x0040
+#define TXBD_RETRYCOUNTMASK 0x003c
+#define TXBD_UNDERRUN 0x0002
+
+/* RxBD status field bits */
+#define RXBD_EMPTY 0x8000
+#define RXBD_RO1 0x4000
+#define RXBD_WRAP 0x2000
+#define RXBD_INTERRUPT 0x1000
+#define RXBD_LAST 0x0800
+#define RXBD_FIRST 0x0400
+#define RXBD_MISS 0x0100
+#define RXBD_BROADCAST 0x0080
+#define RXBD_MULTICAST 0x0040
+#define RXBD_LARGE 0x0020
+#define RXBD_NONOCTET 0x0010
+#define RXBD_SHORT 0x0008
+#define RXBD_CRCERR 0x0004
+#define RXBD_OVERRUN 0x0002
+#define RXBD_TRUNCATED 0x0001
+#define RXBD_STATS 0x01ff
+
+struct txbd8
+{
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer length */
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct rxbd8
+{
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer Length */
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct rmon_mib
+{
+ u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
+ u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */
+ u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */
+ u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */
+ u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */
+ u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */
+ u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */
+ u32 rbyt; /* 0x.69c - Receive Byte Counter */
+ u32 rpkt; /* 0x.6a0 - Receive Packet Counter */
+ u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */
+ u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */
+ u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */
+ u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */
+ u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */
+ u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */
+ u32 raln; /* 0x.6bc - Receive Alignment Error Counter */
+ u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */
+ u32 rcde; /* 0x.6c4 - Receive Code Error Counter */
+ u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */
+ u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */
+ u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */
+ u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */
+ u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */
+ u32 rdrp; /* 0x.6dc - Receive Drop Counter */
+ u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */
+ u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */
+ u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */
+ u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */
+ u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */
+ u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */
+ u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */
+ u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */
+ u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */
+ u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */
+ u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */
+ u32 tncl; /* 0x.70c - Transmit Total Collision Counter */
+ u8 res1[4];
+ u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */
+ u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */
+ u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */
+ u32 txcf; /* 0x.720 - Transmit Control Frame Counter */
+ u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */
+ u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */
+ u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */
+ u32 car1; /* 0x.730 - Carry Register One */
+ u32 car2; /* 0x.734 - Carry Register Two */
+ u32 cam1; /* 0x.738 - Carry Mask Register One */
+ u32 cam2; /* 0x.73c - Carry Mask Register Two */
+};
+
+struct gfar_extra_stats {
+ u64 kernel_dropped;
+ u64 rx_large;
+ u64 rx_short;
+ u64 rx_nonoctet;
+ u64 rx_crcerr;
+ u64 rx_overrun;
+ u64 rx_bsy;
+ u64 rx_babr;
+ u64 rx_trunc;
+ u64 eberr;
+ u64 tx_babt;
+ u64 tx_underrun;
+ u64 rx_skbmissing;
+ u64 tx_timeout;
+};
+
+#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
+#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+
+/* Number of stats in the stats structure (ignore car and cam regs)*/
+#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
+
+#define GFAR_INFOSTR_LEN 32
+
+struct gfar_stats {
+ u64 extra[GFAR_EXTRA_STATS_LEN];
+ u64 rmon[GFAR_RMON_LEN];
+};
+
+
+struct gfar {
+ u8 res1[16];
+ u32 ievent; /* 0x.010 - Interrupt Event Register */
+ u32 imask; /* 0x.014 - Interrupt Mask Register */
+ u32 edis; /* 0x.018 - Error Disabled Register */
+ u8 res2[4];
+ u32 ecntrl; /* 0x.020 - Ethernet Control Register */
+ u32 minflr; /* 0x.024 - Minimum Frame Length Register */
+ u32 ptv; /* 0x.028 - Pause Time Value Register */
+ u32 dmactrl; /* 0x.02c - DMA Control Register */
+ u32 tbipa; /* 0x.030 - TBI PHY Address Register */
+ u8 res3[88];
+ u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
+ u8 res4[8];
+ u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
+ u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
+ u8 res5[96];
+ u32 tctrl; /* 0x.100 - Transmit Control Register */
+ u32 tstat; /* 0x.104 - Transmit Status Register */
+ u8 res6[4];
+ u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
+ u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
+ u8 res7[16];
+ u32 ctbptr; /* 0x.124 - Current Transmit Buffer Descriptor Pointer Register */
+ u8 res8[92];
+ u32 tbptr; /* 0x.184 - Transmit Buffer Descriptor Pointer Low Register */
+ u8 res9[124];
+ u32 tbase; /* 0x.204 - Transmit Descriptor Base Address Register */
+ u8 res10[168];
+ u32 ostbd; /* 0x.2b0 - Out-of-Sequence Transmit Buffer Descriptor Register */
+ u32 ostbdp; /* 0x.2b4 - Out-of-Sequence Transmit Data Buffer Pointer Register */
+ u8 res11[72];
+ u32 rctrl; /* 0x.300 - Receive Control Register */
+ u32 rstat; /* 0x.304 - Receive Status Register */
+ u8 res12[4];
+ u32 rbdlen; /* 0x.30c - RxBD Data Length Register */
+ u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
+ u8 res13[16];
+ u32 crbptr; /* 0x.324 - Current Receive Buffer Descriptor Pointer */
+ u8 res14[24];
+ u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
+ u8 res15[64];
+ u32 rbptr; /* 0x.384 - Receive Buffer Descriptor Pointer */
+ u8 res16[124];
+ u32 rbase; /* 0x.404 - Receive Descriptor Base Address */
+ u8 res17[248];
+ u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
+ u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
+ u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
+ u32 hafdup; /* 0x.50c - Half Duplex Register */
+ u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
+ u8 res18[12];
+ u32 miimcfg; /* 0x.520 - MII Management Configuration Register */
+ u32 miimcom; /* 0x.524 - MII Management Command Register */
+ u32 miimadd; /* 0x.528 - MII Management Address Register */
+ u32 miimcon; /* 0x.52c - MII Management Control Register */
+ u32 miimstat; /* 0x.530 - MII Management Status Register */
+ u32 miimind; /* 0x.534 - MII Management Indicator Register */
+ u8 res19[4];
+ u32 ifstat; /* 0x.53c - Interface Status Register */
+ u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
+ u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
+ u8 res20[312];
+ struct rmon_mib rmon;
+ u8 res21[192];
+ u32 iaddr0; /* 0x.800 - Indivdual address register 0 */
+ u32 iaddr1; /* 0x.804 - Indivdual address register 1 */
+ u32 iaddr2; /* 0x.808 - Indivdual address register 2 */
+ u32 iaddr3; /* 0x.80c - Indivdual address register 3 */
+ u32 iaddr4; /* 0x.810 - Indivdual address register 4 */
+ u32 iaddr5; /* 0x.814 - Indivdual address register 5 */
+ u32 iaddr6; /* 0x.818 - Indivdual address register 6 */
+ u32 iaddr7; /* 0x.81c - Indivdual address register 7 */
+ u8 res22[96];
+ u32 gaddr0; /* 0x.880 - Global address register 0 */
+ u32 gaddr1; /* 0x.884 - Global address register 1 */
+ u32 gaddr2; /* 0x.888 - Global address register 2 */
+ u32 gaddr3; /* 0x.88c - Global address register 3 */
+ u32 gaddr4; /* 0x.890 - Global address register 4 */
+ u32 gaddr5; /* 0x.894 - Global address register 5 */
+ u32 gaddr6; /* 0x.898 - Global address register 6 */
+ u32 gaddr7; /* 0x.89c - Global address register 7 */
+ u8 res23[856];
+ u32 attr; /* 0x.bf8 - Attributes Register */
+ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
+ u8 res24[1024];
+
+};
+
+/* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblence)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+ * and tx_bd_base always point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct gfar_private
+{
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff ** tx_skbuff;
+ struct sk_buff ** rx_skbuff;
+
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx;
+ u16 skb_currx;
+
+ /* index of the first skb which hasn't been transmitted
+ * yet. */
+ u16 skb_dirtytx;
+
+ /* Configuration info for the coalescing features */
+ unsigned char txcoalescing;
+ unsigned short txcount;
+ unsigned short txtime;
+ unsigned char rxcoalescing;
+ unsigned short rxcount;
+ unsigned short rxtime;
+
+ /* GFAR addresses */
+ struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */
+ struct txbd8 *tx_bd_base;
+ struct rxbd8 *cur_rx; /* Next free rx ring entry */
+ struct txbd8 *cur_tx; /* Next free ring entry */
+ struct txbd8 *dirty_tx; /* The Ring entry to be freed. */
+ struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */
+ struct phy_info *phyinfo;
+ struct gfar *phyregs;
+ struct work_struct tq;
+ struct timer_list phy_info_timer;
+ struct net_device_stats stats; /* linux network statistics */
+ struct gfar_extra_stats extra_stats;
+ spinlock_t lock;
+ unsigned int rx_buffer_size;
+ unsigned int rx_stash_size;
+ unsigned int tx_ring_size;
+ unsigned int rx_ring_size;
+ wait_queue_head_t rxcleanupq;
+ unsigned int rxclean;
+ int link; /* current link state */
+ int oldlink;
+ int duplexity; /* Indicates negotiated duplex state */
+ int olddplx;
+ int speed; /* Indicates negotiated speed */
+ int oldspeed;
+
+ /* Info structure initialized by board setup code */
+ struct ocp_gfar_data *einfo;
+};
+
+extern inline u32 gfar_read(volatile unsigned *addr)
+{
+ u32 val;
+ val = in_be32(addr);
+ return val;
+}
+
+extern inline void gfar_write(volatile unsigned *addr, u32 val)
+{
+ out_be32(addr, val);
+}
+
+
+
+#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
new file mode 100644
index 00000000000000..4ccb5afd66d118
--- /dev/null
+++ b/drivers/net/gianfar_ethtool.c
@@ -0,0 +1,484 @@
+/*
+ * drivers/net/gianfar_ethtool.c
+ *
+ * Gianfar Ethernet Driver
+ * Ethtool support for Gianfar Enet
+ * Based on e1000 ethtool support
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This software may be used and distributed according to
+ * the terms of the GNU Public License, Version 2, incorporated herein
+ * by reference.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <linux/ethtool.h>
+
+#include "gianfar.h"
+
+#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+extern int startup_gfar(struct net_device *dev);
+extern void stop_gfar(struct net_device *dev);
+extern void gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+
+void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 * buf);
+void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
+int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+
+static char stat_gstrings[][ETH_GSTRING_LEN] = {
+ "RX Dropped by Kernel",
+ "RX Large Frame Errors",
+ "RX Short Frame Errors",
+ "RX Non-Octet Errors",
+ "RX CRC Errors",
+ "RX Overrun Errors",
+ "RX Busy Errors",
+ "RX Babbling Errors",
+ "RX Truncated Frames",
+ "Ethernet Bus Error",
+ "TX Babbling Errors",
+ "TX Underrun Errors",
+ "RX SKB Missing Errors",
+ "TX Timeout Errors",
+ "tx&rx 64B frames",
+ "tx&rx 65-127B frames",
+ "tx&rx 128-255B frames",
+ "tx&rx 256-511B frames",
+ "tx&rx 512-1023B frames",
+ "tx&rx 1024-1518B frames",
+ "tx&rx 1519-1522B Good VLAN",
+ "RX bytes",
+ "RX Packets",
+ "RX FCS Errors",
+ "Receive Multicast Packet",
+ "Receive Broadcast Packet",
+ "RX Control Frame Packets",
+ "RX Pause Frame Packets",
+ "RX Unknown OP Code",
+ "RX Alignment Error",
+ "RX Frame Length Error",
+ "RX Code Error",
+ "RX Carrier Sense Error",
+ "RX Undersize Packets",
+ "RX Oversize Packets",
+ "RX Fragmented Frames",
+ "RX Jabber Frames",
+ "RX Dropped Frames",
+ "TX Byte Counter",
+ "TX Packets",
+ "TX Multicast Packets",
+ "TX Broadcast Packets",
+ "TX Pause Control Frames",
+ "TX Deferral Packets",
+ "TX Excessive Deferral Packets",
+ "TX Single Collision Packets",
+ "TX Multiple Collision Packets",
+ "TX Late Collision Packets",
+ "TX Excessive Collision Packets",
+ "TX Total Collision",
+ "RESERVED",
+ "TX Dropped Frames",
+ "TX Jabber Frames",
+ "TX FCS Errors",
+ "TX Control Frames",
+ "TX Oversize Frames",
+ "TX Undersize Frames",
+ "TX Fragmented Frames",
+};
+
+/* Fill in an array of 64-bit statistics from various sources.
+ * This array will be appended to the end of the ethtool_stats
+ * structure, and returned to user space
+ */
+void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ u32 *rmon = (u32 *) & priv->regs->rmon;
+ u64 *extra = (u64 *) & priv->extra_stats;
+ struct gfar_stats *stats = (struct gfar_stats *) buf;
+
+ for (i = 0; i < GFAR_RMON_LEN; i++) {
+ stats->rmon[i] = (u64) (rmon[i]);
+ }
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
+ stats->extra[i] = extra[i];
+ }
+}
+
+/* Returns the number of stats (and their corresponding strings) */
+int gfar_stats_count(struct net_device *dev)
+{
+ return GFAR_STATS_LEN;
+}
+
+void gfar_gstrings_normon(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ memcpy(buf, stat_gstrings, GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+void gfar_fill_stats_normon(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ u64 *extra = (u64 *) & priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
+ buf[i] = extra[i];
+ }
+}
+
+
+int gfar_stats_count_normon(struct net_device *dev)
+{
+ return GFAR_EXTRA_STATS_LEN;
+}
+/* Fills in the drvinfo structure with some basic info */
+void gfar_gdrvinfo(struct net_device *dev, struct
+ ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, gfar_driver_name, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ drvinfo->n_stats = GFAR_STATS_LEN;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+/* Return the current settings in the ethtool_cmd structure */
+int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ uint gigabit_support =
+ priv->einfo->flags & GFAR_HAS_GIGABIT ? SUPPORTED_1000baseT_Full : 0;
+ uint gigabit_advert =
+ priv->einfo->flags & GFAR_HAS_GIGABIT ? ADVERTISED_1000baseT_Full: 0;
+
+ cmd->supported = (SUPPORTED_10baseT_Half
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | gigabit_support | SUPPORTED_Autoneg);
+
+ /* For now, we always advertise everything */
+ cmd->advertising = (ADVERTISED_10baseT_Half
+ | ADVERTISED_100baseT_Half
+ | ADVERTISED_100baseT_Full
+ | gigabit_advert | ADVERTISED_Autoneg);
+
+ cmd->speed = priv->speed;
+ cmd->duplex = priv->duplexity;
+ cmd->port = PORT_MII;
+ cmd->phy_address = priv->einfo->phyid;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = priv->txcount;
+ cmd->maxrxpkt = priv->rxcount;
+
+ return 0;
+}
+
+/* Return the length of the register structure */
+int gfar_reglen(struct net_device *dev)
+{
+ return sizeof (struct gfar);
+}
+
+/* Return a dump of the GFAR register space */
+void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ int i;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ u32 *theregs = (u32 *) priv->regs;
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
+ buf[i] = theregs[i];
+}
+
+/* Return the link state 1 is up, 0 is down */
+u32 gfar_get_link(struct net_device *dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ return (u32) priv->link;
+}
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats */
+void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+/* Convert microseconds to ethernet clock ticks, which changes
+ * depending on what speed the controller is running at */
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->speed) {
+ case 1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case 100:
+ count = GFAR_100_TIME;
+ break;
+ case 10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0
+ * if usecs > 0 */
+ return ((usecs * 1000 + count - 1) / count);
+}
+
+/* Convert ethernet clock ticks to microseconds */
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->speed) {
+ case 1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case 100:
+ count = GFAR_100_TIME;
+ break;
+ case 10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0 */
+ /* if ticks is > 0 */
+ return ((ticks * count) / 1000);
+}
+
+/* Get the coalescing parameters, and put them in the cvals
+ * structure. */
+int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
+ cvals->rx_max_coalesced_frames = priv->rxcount;
+
+ cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, priv->txtime);
+ cvals->tx_max_coalesced_frames = priv->txcount;
+
+ cvals->use_adaptive_rx_coalesce = 0;
+ cvals->use_adaptive_tx_coalesce = 0;
+
+ cvals->pkt_rate_low = 0;
+ cvals->rx_coalesce_usecs_low = 0;
+ cvals->rx_max_coalesced_frames_low = 0;
+ cvals->tx_coalesce_usecs_low = 0;
+ cvals->tx_max_coalesced_frames_low = 0;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ cvals->pkt_rate_high = 0;
+ cvals->rx_coalesce_usecs_high = 0;
+ cvals->rx_max_coalesced_frames_high = 0;
+ cvals->tx_coalesce_usecs_high = 0;
+ cvals->tx_max_coalesced_frames_high = 0;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ cvals->rate_sample_interval = 0;
+
+ return 0;
+}
+
+/* Change the coalescing values.
+ * Both cvals->*_usecs and cvals->*_frames have to be > 0
+ * in order for coalescing to be active
+ */
+int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0))
+ priv->rxcoalescing = 0;
+ else
+ priv->rxcoalescing = 1;
+
+ priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs);
+ priv->rxcount = cvals->rx_max_coalesced_frames;
+
+ /* Set up tx coalescing */
+ if ((cvals->tx_coalesce_usecs == 0) ||
+ (cvals->tx_max_coalesced_frames == 0))
+ priv->txcoalescing = 0;
+ else
+ priv->txcoalescing = 1;
+
+ priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs);
+ priv->txcount = cvals->tx_max_coalesced_frames;
+
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ if (priv->txcoalescing)
+ gfar_write(&priv->regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&priv->regs->txic, 0);
+
+ return 0;
+}
+
+/* Fills in rvals with the current ring parameters. Currently,
+ * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
+ * jumbo are ignored by the driver */
+void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ rvals->rx_pending = priv->rx_ring_size;
+ rvals->rx_mini_pending = priv->rx_ring_size;
+ rvals->rx_jumbo_pending = priv->rx_ring_size;
+ rvals->tx_pending = priv->tx_ring_size;
+}
+
+/* Change the current ring parameters, stopping the controller if
+ * necessary so that we don't mess things up while we're in
+ * motion. We wait for the ring to be clean before reallocating
+ * the rings. */
+int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ u32 tempval;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ int err = 0;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->rx_pending)) {
+ printk("%s: Ring sizes must be a power of 2\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->tx_pending)) {
+ printk("%s: Ring sizes must be a power of 2\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ /* Stop the controller so we don't rx any more frames */
+ /* But first, make sure we clear the bits */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+
+ /* Note that rx is not clean right now */
+ priv->rxclean = 0;
+
+ if (dev->flags & IFF_UP) {
+ /* Tell the driver to process the rest of the frames */
+ gfar_receive(0, (void *) dev, NULL);
+
+ /* Now wait for it to be done */
+ wait_event_interruptible(priv->rxcleanupq, priv->rxclean);
+
+ /* Ok, all packets have been handled. Now we bring it down,
+ * change the ring size, and bring it up */
+
+ stop_gfar(dev);
+ }
+
+ priv->rx_ring_size = rvals->rx_pending;
+ priv->tx_ring_size = rvals->tx_pending;
+
+ if (dev->flags & IFF_UP)
+ err = startup_gfar(dev);
+
+ return err;
+}
+
+struct ethtool_ops gfar_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = gfar_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings,
+ .get_stats_count = gfar_stats_count,
+ .get_ethtool_stats = gfar_fill_stats,
+};
diff --git a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c
new file mode 100644
index 00000000000000..208b6c19119535
--- /dev/null
+++ b/drivers/net/gianfar_phy.c
@@ -0,0 +1,504 @@
+/*
+ * drivers/net/gianfar_phy.c
+ *
+ * Gianfar Ethernet Driver -- PHY handling
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+
+#include "gianfar.h"
+#include "gianfar_phy.h"
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+void write_phy_reg(struct net_device *dev, u16 regnum, u16 value)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct gfar *regbase = priv->phyregs;
+ struct ocp_gfar_data *einfo = priv->einfo;
+
+ /* Set the PHY address and the register address we want to write */
+ gfar_write(&regbase->miimadd, ((einfo->phyid) << 8) | regnum);
+
+ /* Write out the value we want */
+ gfar_write(&regbase->miimcon, value);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(&regbase->miimind) & MIIMIND_BUSY)
+ cpu_relax();
+}
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value. Clears miimcom first. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+u16 read_phy_reg(struct net_device *dev, u16 regnum)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct gfar *regbase = priv->phyregs;
+ struct ocp_gfar_data *einfo = priv->einfo;
+ u16 value;
+
+ /* Set the PHY address and the register address we want to read */
+ gfar_write(&regbase->miimadd, ((einfo->phyid) << 8) | regnum);
+
+ /* Clear miimcom, and then initiate a read */
+ gfar_write(&regbase->miimcom, 0);
+ gfar_write(&regbase->miimcom, MIIM_READ_COMMAND);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(&regbase->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
+ cpu_relax();
+
+ /* Grab the value of the register from miimstat */
+ value = gfar_read(&regbase->miimstat);
+
+ return value;
+}
+
+/* returns which value to write to the control register. */
+/* For 10/100 the value is slightly different. */
+u16 mii_cr_init(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct ocp_gfar_data *einfo = priv->einfo;
+
+ if (einfo->flags & GFAR_HAS_GIGABIT)
+ return MIIM_CONTROL_INIT;
+ else
+ return MIIM_CR_INIT;
+}
+
+#define BRIEF_GFAR_ERRORS
+/* Wait for auto-negotiation to complete */
+u16 mii_parse_sr(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ unsigned int timeout = GFAR_AN_TIMEOUT;
+
+ if (mii_reg & MIIM_STATUS_LINK)
+ priv->link = 1;
+ else
+ priv->link = 0;
+
+ /* Only auto-negotiate if the link has just gone up */
+ if (priv->link && !priv->oldlink) {
+ while ((!(mii_reg & MIIM_STATUS_AN_DONE)) && timeout--)
+ mii_reg = read_phy_reg(dev, MIIM_STATUS);
+
+#if defined(BRIEF_GFAR_ERRORS)
+ if (mii_reg & MIIM_STATUS_AN_DONE)
+ printk(KERN_INFO "%s: Auto-negotiation done\n",
+ dev->name);
+ else
+ printk(KERN_INFO "%s: Auto-negotiation timed out\n",
+ dev->name);
+#endif
+ }
+
+ return 0;
+}
+
+/* Determine the speed and duplex which was negotiated */
+u16 mii_parse_88E1011_psr(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ unsigned int speed;
+
+ if (priv->link) {
+ if (mii_reg & MIIM_88E1011_PHYSTAT_DUPLEX)
+ priv->duplexity = 1;
+ else
+ priv->duplexity = 0;
+
+ speed = (mii_reg & MIIM_88E1011_PHYSTAT_SPEED);
+
+ switch (speed) {
+ case MIIM_88E1011_PHYSTAT_GBIT:
+ priv->speed = 1000;
+ break;
+ case MIIM_88E1011_PHYSTAT_100:
+ priv->speed = 100;
+ break;
+ default:
+ priv->speed = 10;
+ break;
+ }
+ } else {
+ priv->speed = 0;
+ priv->duplexity = 0;
+ }
+
+ return 0;
+}
+
+u16 mii_parse_cis8201(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ unsigned int speed;
+
+ if (priv->link) {
+ if (mii_reg & MIIM_CIS8201_AUXCONSTAT_DUPLEX)
+ priv->duplexity = 1;
+ else
+ priv->duplexity = 0;
+
+ speed = mii_reg & MIIM_CIS8201_AUXCONSTAT_SPEED;
+
+ switch (speed) {
+ case MIIM_CIS8201_AUXCONSTAT_GBIT:
+ priv->speed = 1000;
+ break;
+ case MIIM_CIS8201_AUXCONSTAT_100:
+ priv->speed = 100;
+ break;
+ default:
+ priv->speed = 10;
+ break;
+ }
+ } else {
+ priv->speed = 0;
+ priv->duplexity = 0;
+ }
+
+ return 0;
+}
+
+u16 mii_parse_dm9161_scsr(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_100H))
+ priv->speed = 100;
+ else
+ priv->speed = 10;
+
+ if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_10F))
+ priv->duplexity = 1;
+ else
+ priv->duplexity = 0;
+
+ return 0;
+}
+
+u16 dm9161_wait(u16 mii_reg, struct net_device *dev)
+{
+ int timeout = HZ;
+ int secondary = 10;
+ u16 temp;
+
+ do {
+
+ /* Davicom takes a bit to come up after a reset,
+ * so wait here for a bit */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+
+ temp = read_phy_reg(dev, MIIM_STATUS);
+
+ secondary--;
+ } while ((!(temp & MIIM_STATUS_AN_DONE)) && secondary);
+
+ return 0;
+}
+
+static struct phy_info phy_info_M88E1011S = {
+ 0x01410c6,
+ "Marvell 88E1011S",
+ 4,
+ (const struct phy_cmd[]) { /* config */
+ /* Reset and configure the PHY */
+ {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Status is read once to clear old link state */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr},
+ /* Clear the IEVENT register */
+ {MIIM_88E1011_IEVENT, miim_read, NULL},
+ /* Set up the mask */
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ /* Clear the interrupt */
+ {MIIM_88E1011_IEVENT, miim_read, NULL},
+ /* Disable interrupts */
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Check the status */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr},
+ /* Enable Interrupts */
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ {MIIM_88E1011_IEVENT, miim_read, NULL},
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL},
+ {miim_end,}
+ },
+};
+
+/* Cicada 8204 */
+static struct phy_info phy_info_cis8204 = {
+ 0x3f11,
+ "Cicada Cis8204",
+ 6,
+ (const struct phy_cmd[]) { /* config */
+ /* Override PHY config settings */
+ {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL},
+ /* Set up the interface mode */
+ {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL},
+ /* Configure some basic stuff */
+ {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
+ /* Clear the status register */
+ {MIIM_CIS8204_ISTAT, miim_read, NULL},
+ /* Enable interrupts */
+ {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ /* Clear the status register */
+ {MIIM_CIS8204_ISTAT, miim_read, NULL},
+ /* Disable interrupts */
+ {MIIM_CIS8204_IMASK, 0x0, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
+ /* Enable interrupts */
+ {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ /* Clear the status register */
+ {MIIM_CIS8204_ISTAT, miim_read, NULL},
+ /* Disable interrupts */
+ {MIIM_CIS8204_IMASK, 0x0, NULL},
+ {miim_end,}
+ },
+};
+
+/* Cicada 8201 */
+static struct phy_info phy_info_cis8201 = {
+ 0xfc41,
+ "CIS8201",
+ 4,
+ (const struct phy_cmd[]) { /* config */
+ /* Override PHY config settings */
+ {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL},
+ /* Set up the interface mode */
+ {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL},
+ /* Configure some basic stuff */
+ {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ {miim_end,}
+ },
+};
+
+static struct phy_info phy_info_dm9161 = {
+ 0x0181b88,
+ "Davicom DM9161E",
+ 4,
+ (const struct phy_cmd[]) { /* config */
+ {MIIM_CONTROL, MIIM_DM9161_CR_STOP, NULL},
+ /* Do not bypass the scrambler/descrambler */
+ {MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT, NULL},
+ /* Clear 10BTCSR to default */
+ {MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT, NULL},
+ /* Configure some basic stuff */
+ {MIIM_CONTROL, MIIM_CR_INIT, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Restart Auto Negotiation */
+ {MIIM_CONTROL, MIIM_DM9161_CR_RSTAN, NULL},
+ /* Status is read once to clear old link state */
+ {MIIM_STATUS, miim_read, dm9161_wait},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr},
+ /* Clear any pending interrupts */
+ {MIIM_DM9161_INTR, miim_read, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ {MIIM_DM9161_INTR, miim_read, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ {MIIM_STATUS, miim_read, NULL},
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ {MIIM_DM9161_INTR, miim_read, NULL},
+ {miim_end,}
+ },
+};
+
+static struct phy_info *phy_info[] = {
+ &phy_info_cis8201,
+ &phy_info_cis8204,
+ &phy_info_M88E1011S,
+ &phy_info_dm9161,
+ NULL
+};
+
+/* Use the PHY ID registers to determine what type of PHY is attached
+ * to device dev. return a struct phy_info structure describing that PHY
+ */
+struct phy_info * get_phy_info(struct net_device *dev)
+{
+ u16 phy_reg;
+ u32 phy_ID;
+ int i;
+ struct phy_info *theInfo = NULL;
+
+ /* Grab the bits from PHYIR1, and put them in the upper half */
+ phy_reg = read_phy_reg(dev, MIIM_PHYIR1);
+ phy_ID = (phy_reg & 0xffff) << 16;
+
+ /* Grab the bits from PHYIR2, and put them in the lower half */
+ phy_reg = read_phy_reg(dev, MIIM_PHYIR2);
+ phy_ID |= (phy_reg & 0xffff);
+
+ /* loop through all the known PHY types, and find one that */
+ /* matches the ID we read from the PHY. */
+ for (i = 0; phy_info[i]; i++)
+ if (phy_info[i]->id == (phy_ID >> phy_info[i]->shift))
+ theInfo = phy_info[i];
+
+ if (theInfo == NULL) {
+ printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID);
+ return NULL;
+ } else {
+ printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name,
+ phy_ID);
+ }
+
+ return theInfo;
+}
+
+/* Take a list of struct phy_cmd, and, depending on the values, either */
+/* read or write, using a helper function if provided */
+/* It is assumed that all lists of struct phy_cmd will be terminated by */
+/* mii_end. */
+void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd)
+{
+ int i;
+ u16 result;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct gfar *phyregs = priv->phyregs;
+
+ /* Reset the management interface */
+ gfar_write(&phyregs->miimcfg, MIIMCFG_RESET);
+
+ /* Setup the MII Mgmt clock speed */
+ gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE);
+
+ /* Wait until the bus is free */
+ while (gfar_read(&phyregs->miimind) & MIIMIND_BUSY)
+ cpu_relax();
+
+ for (i = 0; cmd->mii_reg != miim_end; i++) {
+ /* The command is a read if mii_data is miim_read */
+ if (cmd->mii_data == miim_read) {
+ /* Read the value of the PHY reg */
+ result = read_phy_reg(dev, cmd->mii_reg);
+
+ /* If a function was supplied, we need to let it process */
+ /* the result. */
+ if (cmd->funct != NULL)
+ (*(cmd->funct)) (result, dev);
+ } else { /* Otherwise, it's a write */
+ /* If a function was supplied, it will provide
+ * the value to write */
+ /* Otherwise, the value was supplied in cmd->mii_data */
+ if (cmd->funct != NULL)
+ result = (*(cmd->funct)) (0, dev);
+ else
+ result = cmd->mii_data;
+
+ /* Write the appropriate value to the PHY reg */
+ write_phy_reg(dev, cmd->mii_reg, result);
+ }
+ cmd++;
+ }
+}
diff --git a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h
new file mode 100644
index 00000000000000..053f9580a1aa7c
--- /dev/null
+++ b/drivers/net/gianfar_phy.h
@@ -0,0 +1,192 @@
+/*
+ * drivers/net/gianfar_phy.h
+ *
+ * Gianfar Ethernet Driver -- PHY handling
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __GIANFAR_PHY_H
+#define __GIANFAR_PHY_H
+
+#define miim_end ((u32)-2)
+#define miim_read ((u32)-1)
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+
+#define MIIM_CONTROL 0x00
+#define MIIM_CONTROL_RESET 0x00008000
+#define MIIM_CONTROL_INIT 0x00001140
+#define MIIM_ANEN 0x00001000
+
+#define MIIM_CR 0x00
+#define MIIM_CR_RST 0x00008000
+#define MIIM_CR_INIT 0x00001000
+
+#define MIIM_STATUS 0x1
+#define MIIM_STATUS_AN_DONE 0x00000020
+#define MIIM_STATUS_LINK 0x0004
+
+#define MIIM_PHYIR1 0x2
+#define MIIM_PHYIR2 0x3
+
+#define GFAR_AN_TIMEOUT 0x000fffff
+
+#define MIIM_ANLPBPA 0x5
+#define MIIM_ANLPBPA_HALF 0x00000040
+#define MIIM_ANLPBPA_FULL 0x00000020
+
+#define MIIM_ANEX 0x6
+#define MIIM_ANEX_NP 0x00000004
+#define MIIM_ANEX_PRX 0x00000002
+
+
+/* Cicada Extended Control Register 1 */
+#define MIIM_CIS8201_EXT_CON1 0x17
+#define MIIM_CIS8201_EXTCON1_INIT 0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MIIM_CIS8204_IMASK 0x19
+#define MIIM_CIS8204_IMASK_IEN 0x8000
+#define MIIM_CIS8204_IMASK_SPEED 0x4000
+#define MIIM_CIS8204_IMASK_LINK 0x2000
+#define MIIM_CIS8204_IMASK_DUPLEX 0x1000
+#define MIIM_CIS8204_IMASK_MASK 0xf000
+
+/* Cicada Interrupt Status Register */
+#define MIIM_CIS8204_ISTAT 0x1a
+#define MIIM_CIS8204_ISTAT_STATUS 0x8000
+#define MIIM_CIS8204_ISTAT_SPEED 0x4000
+#define MIIM_CIS8204_ISTAT_LINK 0x2000
+#define MIIM_CIS8204_ISTAT_DUPLEX 0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MIIM_CIS8201_AUX_CONSTAT 0x1c
+#define MIIM_CIS8201_AUXCONSTAT_INIT 0x0004
+#define MIIM_CIS8201_AUXCONSTAT_DUPLEX 0x0020
+#define MIIM_CIS8201_AUXCONSTAT_SPEED 0x0018
+#define MIIM_CIS8201_AUXCONSTAT_GBIT 0x0010
+#define MIIM_CIS8201_AUXCONSTAT_100 0x0008
+
+/* 88E1011 PHY Status Register */
+#define MIIM_88E1011_PHY_STATUS 0x11
+#define MIIM_88E1011_PHYSTAT_SPEED 0xc000
+#define MIIM_88E1011_PHYSTAT_GBIT 0x8000
+#define MIIM_88E1011_PHYSTAT_100 0x4000
+#define MIIM_88E1011_PHYSTAT_DUPLEX 0x2000
+#define MIIM_88E1011_PHYSTAT_LINK 0x0400
+
+#define MIIM_88E1011_IEVENT 0x13
+#define MIIM_88E1011_IEVENT_CLEAR 0x0000
+
+#define MIIM_88E1011_IMASK 0x12
+#define MIIM_88E1011_IMASK_INIT 0x6400
+#define MIIM_88E1011_IMASK_CLEAR 0x0000
+
+/* DM9161 Control register values */
+#define MIIM_DM9161_CR_STOP 0x0400
+#define MIIM_DM9161_CR_RSTAN 0x1200
+
+#define MIIM_DM9161_SCR 0x10
+#define MIIM_DM9161_SCR_INIT 0x0610
+
+/* DM9161 Specified Configuration and Status Register */
+#define MIIM_DM9161_SCSR 0x11
+#define MIIM_DM9161_SCSR_100F 0x8000
+#define MIIM_DM9161_SCSR_100H 0x4000
+#define MIIM_DM9161_SCSR_10F 0x2000
+#define MIIM_DM9161_SCSR_10H 0x1000
+
+/* DM9161 Interrupt Register */
+#define MIIM_DM9161_INTR 0x15
+#define MIIM_DM9161_INTR_PEND 0x8000
+#define MIIM_DM9161_INTR_DPLX_MASK 0x0800
+#define MIIM_DM9161_INTR_SPD_MASK 0x0400
+#define MIIM_DM9161_INTR_LINK_MASK 0x0200
+#define MIIM_DM9161_INTR_MASK 0x0100
+#define MIIM_DM9161_INTR_DPLX_CHANGE 0x0010
+#define MIIM_DM9161_INTR_SPD_CHANGE 0x0008
+#define MIIM_DM9161_INTR_LINK_CHANGE 0x0004
+#define MIIM_DM9161_INTR_INIT 0x0000
+#define MIIM_DM9161_INTR_STOP \
+(MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \
+ | MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MIIM_DM9161_10BTCSR 0x12
+#define MIIM_DM9161_10BTCSR_INIT 0x7800
+
+
+#define MIIM_READ_COMMAND 0x00000001
+
+/*
+ * struct phy_cmd: A command for reading or writing a PHY register
+ *
+ * mii_reg: The register to read or write
+ *
+ * mii_data: For writes, the value to put in the register.
+ * A value of -1 indicates this is a read.
+ *
+ * funct: A function pointer which is invoked for each command.
+ * For reads, this function will be passed the value read
+ * from the PHY, and process it.
+ * For writes, the result of this function will be written
+ * to the PHY register
+ */
+struct phy_cmd {
+ u32 mii_reg;
+ u32 mii_data;
+ u16 (*funct) (u16 mii_reg, struct net_device * dev);
+};
+
+/* struct phy_info: a structure which defines attributes for a PHY
+ *
+ * id will contain a number which represents the PHY. During
+ * startup, the driver will poll the PHY to find out what its
+ * UID--as defined by registers 2 and 3--is. The 32-bit result
+ * gotten from the PHY will be shifted right by "shift" bits to
+ * discard any bits which may change based on revision numbers
+ * unimportant to functionality
+ *
+ * The struct phy_cmd entries represent pointers to an arrays of
+ * commands which tell the driver what to do to the PHY.
+ */
+struct phy_info {
+ u32 id;
+ char *name;
+ unsigned int shift;
+ /* Called to configure the PHY, and modify the controller
+ * based on the results */
+ const struct phy_cmd *config;
+
+ /* Called when starting up the controller. Usually sets
+ * up the interrupt for state changes */
+ const struct phy_cmd *startup;
+
+ /* Called inside the interrupt handler to acknowledge
+ * the interrupt */
+ const struct phy_cmd *ack_int;
+
+ /* Called in the bottom half to handle the interrupt */
+ const struct phy_cmd *handle_int;
+
+ /* Called when bringing down the controller. Usually stops
+ * the interrupts from being generated */
+ const struct phy_cmd *shutdown;
+};
+
+struct phy_info *get_phy_info(struct net_device *dev);
+void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd);
+
+#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/gt64240eth.h b/drivers/net/gt64240eth.h
new file mode 100644
index 00000000000000..7e7af0d5658702
--- /dev/null
+++ b/drivers/net/gt64240eth.h
@@ -0,0 +1,402 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Patton Electronics Company
+ * Copyright (C) 2002 Momentum Computer
+ *
+ * Copyright 2000 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * stevel@mvista.com or support@mvista.com
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Ethernet driver definitions for the MIPS GT96100 Advanced
+ * Communication Controller.
+ *
+ * Modified for the Marvellous GT64240 Retarded Communication Controller.
+ */
+#ifndef _GT64240ETH_H
+#define _GT64240ETH_H
+
+#include <asm/gt64240.h>
+
+#define ETHERNET_PORTS_DIFFERENCE_OFFSETS 0x400
+
+/* Translate those weanie names from Galileo/VxWorks header files: */
+
+#define GT64240_MRR MAIN_ROUTING_REGISTER
+#define GT64240_CIU_ARBITER_CONFIG COMM_UNIT_ARBITER_CONFIGURATION_REGISTER
+#define GT64240_CIU_ARBITER_CONTROL COMM_UNIT_ARBITER_CONTROL
+#define GT64240_MAIN_LOW_CAUSE LOW_INTERRUPT_CAUSE_REGISTER
+#define GT64240_MAIN_HIGH_CAUSE HIGH_INTERRUPT_CAUSE_REGISTER
+#define GT64240_CPU_LOW_MASK CPU_INTERRUPT_MASK_REGISTER_LOW
+#define GT64240_CPU_HIGH_MASK CPU_INTERRUPT_MASK_REGISTER_HIGH
+#define GT64240_CPU_SELECT_CAUSE CPU_SELECT_CAUSE_REGISTER
+
+#define GT64240_ETH_PHY_ADDR_REG ETHERNET_PHY_ADDRESS_REGISTER
+#define GT64240_ETH_PORT_CONFIG ETHERNET0_PORT_CONFIGURATION_REGISTER
+#define GT64240_ETH_PORT_CONFIG_EXT ETHERNET0_PORT_CONFIGURATION_EXTEND_REGISTER
+#define GT64240_ETH_PORT_COMMAND ETHERNET0_PORT_COMMAND_REGISTER
+#define GT64240_ETH_PORT_STATUS ETHERNET0_PORT_STATUS_REGISTER
+#define GT64240_ETH_IO_SIZE ETHERNET_PORTS_DIFFERENCE_OFFSETS
+#define GT64240_ETH_SMI_REG ETHERNET_SMI_REGISTER
+#define GT64240_ETH_MIB_COUNT_BASE ETHERNET0_MIB_COUNTER_BASE
+#define GT64240_ETH_SDMA_CONFIG ETHERNET0_SDMA_CONFIGURATION_REGISTER
+#define GT64240_ETH_SDMA_COMM ETHERNET0_SDMA_COMMAND_REGISTER
+#define GT64240_ETH_INT_MASK ETHERNET0_INTERRUPT_MASK_REGISTER
+#define GT64240_ETH_INT_CAUSE ETHERNET0_INTERRUPT_CAUSE_REGISTER
+#define GT64240_ETH_CURR_TX_DESC_PTR0 ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_CURR_TX_DESC_PTR1 ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER1
+#define GT64240_ETH_1ST_RX_DESC_PTR0 ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_CURR_RX_DESC_PTR0 ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_HASH_TBL_PTR ETHERNET0_HASH_TABLE_POINTER_REGISTER
+
+/* Turn on NAPI by default */
+
+#define GT64240_NAPI 1
+
+/* Some 64240 settings that SHOULD eventually be setup in PROM monitor: */
+/* (Board-specific to the DSL3224 Rev A board ONLY!) */
+#define D3224_MPP_CTRL0_SETTING 0x66669900
+#define D3224_MPP_CTRL1_SETTING 0x00000000
+#define D3224_MPP_CTRL2_SETTING 0x00887700
+#define D3224_MPP_CTRL3_SETTING 0x00000044
+#define D3224_GPP_IO_CTRL_SETTING 0x0000e800
+#define D3224_GPP_LEVEL_CTRL_SETTING 0xf001f703
+#define D3224_GPP_VALUE_SETTING 0x00000000
+
+/* Keep the ring sizes a power of two for efficiency. */
+//-#define TX_RING_SIZE 16
+#define TX_RING_SIZE 64 /* TESTING !!! */
+#define RX_RING_SIZE 32
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+#define RX_HASH_TABLE_SIZE 16384
+#define HASH_HOP_NUMBER 12
+
+#define NUM_INTERFACES 3
+
+#define GT64240ETH_TX_TIMEOUT HZ/4
+
+#define MIPS_GT64240_BASE 0xf4000000
+#define GT64240_ETH0_BASE (MIPS_GT64240_BASE + GT64240_ETH_PORT_CONFIG)
+#define GT64240_ETH1_BASE (GT64240_ETH0_BASE + GT64240_ETH_IO_SIZE)
+#define GT64240_ETH2_BASE (GT64240_ETH1_BASE + GT64240_ETH_IO_SIZE)
+
+#if defined(CONFIG_MIPS_DSL3224)
+#define GT64240_ETHER0_IRQ 4
+#define GT64240_ETHER1_IRQ 4
+#else
+#define GT64240_ETHER0_IRQ -1
+#define GT64240_ETHER1_IRQ -1
+#endif
+
+#define REV_GT64240 0x1
+#define REV_GT64240A 0x10
+
+#define GT64240ETH_READ(gp, offset) \
+ GT_READ((gp)->port_offset + (offset))
+
+#define GT64240ETH_WRITE(gp, offset, data) \
+ GT_WRITE((gp)->port_offset + (offset), (data))
+
+#define GT64240ETH_SETBIT(gp, offset, bits) \
+ GT64240ETH_WRITE((gp), (offset), \
+ GT64240ETH_READ((gp), (offset)) | (bits))
+
+#define GT64240ETH_CLRBIT(gp, offset, bits) \
+ GT64240ETH_WRITE((gp), (offset), \
+ GT64240ETH_READ((gp), (offset)) & ~(bits))
+
+#define GT64240_READ(ofs) GT_READ(ofs)
+#define GT64240_WRITE(ofs, data) GT_WRITE((ofs), (data))
+
+/* Bit definitions of the SMI Reg */
+enum {
+ smirDataMask = 0xffff,
+ smirPhyAdMask = 0x1f << 16,
+ smirPhyAdBit = 16,
+ smirRegAdMask = 0x1f << 21,
+ smirRegAdBit = 21,
+ smirOpCode = 1 << 26,
+ smirReadValid = 1 << 27,
+ smirBusy = 1 << 28
+};
+
+/* Bit definitions of the Port Config Reg */
+enum pcr_bits {
+ pcrPM = 1 << 0,
+ pcrRBM = 1 << 1,
+ pcrPBF = 1 << 2,
+ pcrEN = 1 << 7,
+ pcrLPBKMask = 0x3 << 8,
+ pcrLPBKBit = 1 << 8,
+ pcrFC = 1 << 10,
+ pcrHS = 1 << 12,
+ pcrHM = 1 << 13,
+ pcrHDM = 1 << 14,
+ pcrHD = 1 << 15,
+ pcrISLMask = 0x7 << 28,
+ pcrISLBit = 28,
+ pcrACCS = 1 << 31
+};
+
+/* Bit definitions of the Port Config Extend Reg */
+enum pcxr_bits {
+ pcxrIGMP = 1,
+ pcxrSPAN = 2,
+ pcxrPAR = 4,
+ pcxrPRIOtxMask = 0x7 << 3,
+ pcxrPRIOtxBit = 3,
+ pcxrPRIOrxMask = 0x3 << 6,
+ pcxrPRIOrxBit = 6,
+ pcxrPRIOrxOverride = 1 << 8,
+ pcxrDPLXen = 1 << 9,
+ pcxrFCTLen = 1 << 10,
+ pcxrFLP = 1 << 11,
+ pcxrFCTL = 1 << 12,
+ pcxrMFLMask = 0x3 << 14,
+ pcxrMFLBit = 14,
+ pcxrMIBclrMode = 1 << 16,
+ pcxrSpeed = 1 << 18,
+ pcxrSpeeden = 1 << 19,
+ pcxrRMIIen = 1 << 20,
+ pcxrDSCPen = 1 << 21
+};
+
+/* Bit definitions of the Port Command Reg */
+enum pcmr_bits {
+ pcmrFJ = 1 << 15
+};
+
+
+/* Bit definitions of the Port Status Reg */
+enum psr_bits {
+ psrSpeed = 1,
+ psrDuplex = 2,
+ psrFctl = 4,
+ psrLink = 8,
+ psrPause = 1 << 4,
+ psrTxLow = 1 << 5,
+ psrTxHigh = 1 << 6,
+ psrTxInProg = 1 << 7
+};
+
+/* Bit definitions of the SDMA Config Reg */
+enum sdcr_bits {
+ sdcrRCMask = 0xf << 2,
+ sdcrRCBit = 2,
+ sdcrBLMR = 1 << 6,
+ sdcrBLMT = 1 << 7,
+ sdcrPOVR = 1 << 8,
+ sdcrRIFB = 1 << 9,
+ sdcrBSZMask = 0x3 << 12,
+ sdcrBSZBit = 12
+};
+
+/* Bit definitions of the SDMA Command Reg */
+enum sdcmr_bits {
+ sdcmrERD = 1 << 7,
+ sdcmrAR = 1 << 15,
+ sdcmrSTDH = 1 << 16,
+ sdcmrSTDL = 1 << 17,
+ sdcmrTXDH = 1 << 23,
+ sdcmrTXDL = 1 << 24,
+ sdcmrAT = 1 << 31
+};
+
+/* Bit definitions of the Interrupt Cause Reg */
+enum icr_bits {
+ icrRxBuffer = 1,
+ icrTxBufferHigh = 1 << 2,
+ icrTxBufferLow = 1 << 3,
+ icrTxEndHigh = 1 << 6,
+ icrTxEndLow = 1 << 7,
+ icrRxError = 1 << 8,
+ icrTxErrorHigh = 1 << 10,
+ icrTxErrorLow = 1 << 11,
+ icrRxOVR = 1 << 12,
+ icrTxUdr = 1 << 13,
+ icrRxBufferQ0 = 1 << 16,
+ icrRxBufferQ1 = 1 << 17,
+ icrRxBufferQ2 = 1 << 18,
+ icrRxBufferQ3 = 1 << 19,
+ icrRxErrorQ0 = 1 << 20,
+ icrRxErrorQ1 = 1 << 21,
+ icrRxErrorQ2 = 1 << 22,
+ icrRxErrorQ3 = 1 << 23,
+ icrMIIPhySTC = 1 << 28,
+ icrSMIdone = 1 << 29,
+ icrEtherIntSum = 1 << 31
+};
+
+
+/* The Rx and Tx descriptor lists. */
+#ifdef __LITTLE_ENDIAN
+typedef struct {
+ u32 cmdstat;
+ u16 reserved; //-prk21aug01 u32 reserved:16;
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u32 buff_ptr;
+ u32 next;
+} gt64240_td_t;
+
+typedef struct {
+ u32 cmdstat;
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u16 buff_sz; //-prk21aug01 u32 buff_sz:16;
+ u32 buff_ptr;
+ u32 next;
+} gt64240_rd_t;
+#elif defined(__BIG_ENDIAN)
+typedef struct {
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u16 reserved; //-prk21aug01 u32 reserved:16;
+ u32 cmdstat;
+ u32 next;
+ u32 buff_ptr;
+} gt64240_td_t;
+
+typedef struct {
+ u16 buff_sz; //-prk21aug01 u32 buff_sz:16;
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u32 cmdstat;
+ u32 next;
+ u32 buff_ptr;
+} gt64240_rd_t;
+#else
+#error Either __BIG_ENDIAN or __LITTLE_ENDIAN must be defined!
+#endif
+
+
+/* Values for the Tx command-status descriptor entry. */
+enum td_cmdstat {
+ txOwn = 1 << 31,
+ txAutoMode = 1 << 30,
+ txEI = 1 << 23,
+ txGenCRC = 1 << 22,
+ txPad = 1 << 18,
+ txFirst = 1 << 17,
+ txLast = 1 << 16,
+ txErrorSummary = 1 << 15,
+ txReTxCntMask = 0x0f << 10,
+ txReTxCntBit = 10,
+ txCollision = 1 << 9,
+ txReTxLimit = 1 << 8,
+ txUnderrun = 1 << 6,
+ txLateCollision = 1 << 5
+};
+
+
+/* Values for the Rx command-status descriptor entry. */
+enum rd_cmdstat {
+ rxOwn = 1 << 31,
+ rxAutoMode = 1 << 30,
+ rxEI = 1 << 23,
+ rxFirst = 1 << 17,
+ rxLast = 1 << 16,
+ rxErrorSummary = 1 << 15,
+ rxIGMP = 1 << 14,
+ rxHashExpired = 1 << 13,
+ rxMissedFrame = 1 << 12,
+ rxFrameType = 1 << 11,
+ rxShortFrame = 1 << 8,
+ rxMaxFrameLen = 1 << 7,
+ rxOverrun = 1 << 6,
+ rxCollision = 1 << 4,
+ rxCRCError = 1
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ hteValid = 1,
+ hteSkip = 2,
+ hteRD = 4
+};
+
+// The MIB counters
+typedef struct {
+ u32 byteReceived;
+ u32 byteSent;
+ u32 framesReceived;
+ u32 framesSent;
+ u32 totalByteReceived;
+ u32 totalFramesReceived;
+ u32 broadcastFramesReceived;
+ u32 multicastFramesReceived;
+ u32 cRCError;
+ u32 oversizeFrames;
+ u32 fragments;
+ u32 jabber;
+ u32 collision;
+ u32 lateCollision;
+ u32 frames64;
+ u32 frames65_127;
+ u32 frames128_255;
+ u32 frames256_511;
+ u32 frames512_1023;
+ u32 frames1024_MaxSize;
+ u32 macRxError;
+ u32 droppedFrames;
+ u32 outMulticastFrames;
+ u32 outBroadcastFrames;
+ u32 undersizeFrames;
+} mib_counters_t;
+
+
+struct gt64240_private {
+ gt64240_rd_t *rx_ring;
+ gt64240_td_t *tx_ring;
+ // The Rx and Tx rings must be 16-byte aligned
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ char *hash_table;
+ // The Hash Table must be 8-byte aligned
+ dma_addr_t hash_table_dma;
+ int hash_mode;
+
+ // The Rx buffers must be 8-byte aligned
+ char *rx_buff;
+ dma_addr_t rx_buff_dma;
+ // Tx buffers (tx_skbuff[i]->data) with less than 8 bytes
+ // of payload must be 8-byte aligned
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ int rx_next_out; /* The next free ring entry to receive */
+ int tx_next_in; /* The next free ring entry to send */
+ int tx_next_out; /* The last ring entry the ISR processed */
+ int tx_count; /* current # of pkts waiting to be sent in Tx ring */
+ int intr_work_done; /* number of Rx and Tx pkts processed in the isr */
+ int tx_full; /* Tx ring is full */
+
+ mib_counters_t mib;
+ struct net_device_stats stats;
+
+ int io_size;
+ int port_num; // 0 or 1
+ u32 port_offset;
+
+ int phy_addr; // PHY address
+ u32 last_psr; // last value of the port status register
+
+ int options; /* User-settable misc. driver options. */
+ int drv_flags;
+ spinlock_t lock; /* Serialise access to device */
+ struct mii_if_info mii_if;
+
+ u32 msg_enable;
+};
+
+#endif /* _GT64240ETH_H */
diff --git a/drivers/net/gt96100eth.c b/drivers/net/gt96100eth.c
index 0cbf1d143f890a..a41ccc45200726 100644
--- a/drivers/net/gt96100eth.c
+++ b/drivers/net/gt96100eth.c
@@ -28,7 +28,6 @@
* gt96100_cleanup_module(), and other general code cleanups
* <stevel@mvista.com>.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -66,10 +65,6 @@ static int gt96100_add_hash_entry(struct net_device *dev,
static void read_mib_counters(struct gt96100_private *gp);
static int read_MII(int phy_addr, u32 reg);
static int write_MII(int phy_addr, u32 reg, u16 data);
-#if 0
-static void dump_tx_ring(struct net_device *dev);
-static void dump_rx_ring(struct net_device *dev);
-#endif
static int gt96100_init_module(void);
static void gt96100_cleanup_module(void);
static void dump_MII(int dbg_lvl, struct net_device *dev);
@@ -84,7 +79,7 @@ static void abort(struct net_device *dev, u32 abort_bits);
static void hard_stop(struct net_device *dev);
static void enable_ether_irq(struct net_device *dev);
static void disable_ether_irq(struct net_device *dev);
-static int gt96100_probe1(int port_num);
+static int gt96100_probe1(struct pci_dev *pci, int port_num);
static void reset_tx(struct net_device *dev);
static void reset_rx(struct net_device *dev);
static int gt96100_check_tx_consistent(struct gt96100_private *gp);
@@ -164,13 +159,11 @@ chip_name(int chip_rev)
/*
DMA memory allocation, derived from pci_alloc_consistent.
*/
-static void *
-dmaalloc(size_t size, dma_addr_t *dma_handle)
+static void * dmaalloc(size_t size, dma_addr_t *dma_handle)
{
void *ret;
- ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA,
- get_order(size));
+ ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, get_order(size));
if (ret != NULL) {
dma_cache_inv((unsigned long)ret, size);
@@ -184,17 +177,13 @@ dmaalloc(size_t size, dma_addr_t *dma_handle)
return ret;
}
-static void
-dmafree(size_t size, void *vaddr)
+static void dmafree(size_t size, void *vaddr)
{
vaddr = (void*)KSEG0ADDR(vaddr);
free_pages((unsigned long)vaddr, get_order(size));
}
-
-
-static void
-gt96100_delay(int ms)
+static void gt96100_delay(int ms)
{
if (in_interrupt())
return;
@@ -327,34 +316,6 @@ write_MII(int phy_addr, u32 reg, u16 data)
return 0;
}
-#if 0
-// These routines work, just disabled to avoid compile warnings
-static void
-dump_tx_ring(struct net_device *dev)
-{
- struct gt96100_private *gp = netdev_priv(dev);
- int i;
-
- dbg(0, "%s: txno/txni/cnt=%d/%d/%d\n", __FUNCTION__,
- gp->tx_next_out, gp->tx_next_in, gp->tx_count);
-
- for (i=0; i<TX_RING_SIZE; i++)
- dump_tx_desc(0, dev, i);
-}
-
-static void
-dump_rx_ring(struct net_device *dev)
-{
- struct gt96100_private *gp = netdev_priv(dev);
- int i;
-
- dbg(0, "%s: rxno=%d\n", __FUNCTION__, gp->rx_next_out);
-
- for (i=0; i<RX_RING_SIZE; i++)
- dump_rx_desc(0, dev, i);
-}
-#endif
-
static void
dump_MII(int dbg_lvl, struct net_device *dev)
{
@@ -647,23 +608,19 @@ disable_ether_irq(struct net_device *dev)
/*
* Init GT96100 ethernet controller driver
*/
-int gt96100_init_module(void)
+static int gt96100_init_module(void)
{
+ struct pci_dev *pci;
int i, retval=0;
- u16 vendor_id, device_id;
u32 cpuConfig;
-#ifndef CONFIG_MIPS_GT96100ETH
- return -ENODEV;
-#endif
-
- // probe for GT96100 by reading PCI0 vendor/device ID register
- pcibios_read_config_word(0, 0, PCI_VENDOR_ID, &vendor_id);
- pcibios_read_config_word(0, 0, PCI_DEVICE_ID, &device_id);
-
- if (vendor_id != PCI_VENDOR_ID_MARVELL ||
- (device_id != PCI_DEVICE_ID_MARVELL_GT96100 &&
- device_id != PCI_DEVICE_ID_MARVELL_GT96100A)) {
+ /*
+ * Stupid probe because this really isn't a PCI device
+ */
+ if (!(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_GT96100, NULL)) &&
+ !(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_GT96100A, NULL))) {
printk(KERN_ERR __FILE__ ": GT96100 not found!\n");
return -ENODEV;
}
@@ -675,17 +632,13 @@ int gt96100_init_module(void)
return -ENODEV;
}
- for (i=0; i < NUM_INTERFACES; i++) {
- retval |= gt96100_probe1(i);
- }
+ for (i=0; i < NUM_INTERFACES; i++)
+ retval |= gt96100_probe1(pci, i);
return retval;
}
-
-
-static int __init
-gt96100_probe1(int port_num)
+static int __init gt96100_probe1(struct pci_dev *pci, int port_num)
{
struct gt96100_private *gp = NULL;
struct gt96100_if_t *gtif = &gt96100_iflist[port_num];
@@ -696,19 +649,19 @@ gt96100_probe1(int port_num)
struct net_device *dev = NULL;
if (gtif->irq < 0) {
- printk(KERN_ERR "%s: irq unknown - probing not supported\n", __FUNCTION_);
+ printk(KERN_ERR "%s: irq unknown - probing not supported\n",
+ __FUNCTION__);
return -ENODEV;
}
- pcibios_read_config_byte(0, 0, PCI_REVISION_ID, &chip_rev);
+ pci_read_config_byte(pci, PCI_REVISION_ID, &chip_rev);
if (chip_rev >= REV_GT96100A_1) {
phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
phy_addr = (phyAD >> (5*port_num)) & 0x1f;
} else {
/*
- * not sure what's this about -- probably
- * a gt bug
+ * not sure what's this about -- probably a gt bug
*/
phy_addr = port_num;
phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
@@ -831,6 +784,7 @@ out1:
free_netdev (dev);
out:
release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
+
err("%s failed. Returns %d\n", __FUNCTION__, retval);
return retval;
}
@@ -1102,6 +1056,7 @@ gt96100_close(struct net_device *dev)
}
free_irq(dev->irq, dev);
+
return 0;
}
@@ -1312,10 +1267,11 @@ gt96100_tx_complete(struct net_device *dev, u32 status)
cmdstat, nextOut);
if (cmdstat & (u32)txOwn) {
- //dump_tx_ring(dev);
- // DMA is not finished writing descriptor???
- // Leave and come back later to pick-up where
- // we left off.
+ /*
+ * DMA is not finished writing descriptor???
+ * Leave and come back later to pick-up where
+ * we left off.
+ */
break;
}
@@ -1342,7 +1298,8 @@ gt96100_tx_complete(struct net_device *dev, u32 status)
gp->tx_full = 0;
if (gp->last_psr & psrLink) {
netif_wake_queue(dev);
- dbg(2, "%s: Tx Ring was full, queue waked\n", __FUNCTION_);
+ dbg(2, "%s: Tx Ring was full, queue waked\n",
+ __FUNCTION__);
}
}
@@ -1425,12 +1382,12 @@ gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if ((psr & psrLink) && !gp->tx_full &&
netif_queue_stopped(dev)) {
- dbg(0, ": Link up, waking queue.\n",
- __FUNCTION_);
+ dbg(0, "%s: Link up, waking queue.\n",
+ __FUNCTION__);
netif_wake_queue(dev);
} else if (!(psr & psrLink) &&
!netif_queue_stopped(dev)) {
- dbg(0, "Link down, stopping queue.\n",
+ dbg(0, "%s: Link down, stopping queue.\n",
__FUNCTION__);
netif_stop_queue(dev);
}
@@ -1569,8 +1526,8 @@ static void gt96100_cleanup_module(void)
for (i=0; i<NUM_INTERFACES; i++) {
struct gt96100_if_t *gtif = &gt96100_iflist[i];
if (gtif->dev != NULL) {
- struct gt96100_private *gp =
- (struct gt96100_private *)gtif->dev->priv;
+ struct gt96100_private *gp = (struct gt96100_private *)
+ netdev_priv(gtif->dev);
unregister_netdev(gtif->dev);
dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
@@ -1583,9 +1540,6 @@ static void gt96100_cleanup_module(void)
}
}
-
-#ifndef MODULE
-
static int __init gt96100_setup(char *options)
{
char *this_opt;
@@ -1610,9 +1564,6 @@ static int __init gt96100_setup(char *options)
__setup("gt96100eth=", gt96100_setup);
-#endif /* !MODULE */
-
-
module_init(gt96100_init_module);
module_exit(gt96100_cleanup_module);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
new file mode 100644
index 00000000000000..41d38b3b3b4f2d
--- /dev/null
+++ b/drivers/net/mv643xx_eth.c
@@ -0,0 +1,2646 @@
+/*
+ * drivers/net/mv64340_eth.c - Driver for MV64340X ethernet ports
+ * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
+ *
+ * Based on the 64360 driver from:
+ * Copyright (C) 2002 rabeeh@galileo.co.il
+ *
+ * Copyright (C) 2003 PMC-Sierra, Inc.,
+ * written by Manish Lachwani (lachwani@pmc-sierra.com)
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+#include <asm/smp.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include "mv643xx_eth.h"
+
+/*
+ * The first part is the high level driver of the gigE ethernet ports.
+ */
+
+/* Definition for configuring driver */
+#undef MV64340_RX_QUEUE_FILL_ON_TASK
+
+/* Constants */
+#define EXTRA_BYTES 32
+#define WRAP ETH_HLEN + 2 + 4 + 16
+#define BUFFER_MTU dev->mtu + WRAP
+#define INT_CAUSE_UNMASK_ALL 0x0007ffff
+#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
+#ifdef MV64340_RX_FILL_ON_TASK
+#define INT_CAUSE_MASK_ALL 0x00000000
+#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
+#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
+#endif
+
+/* Static function declarations */
+static int mv64340_eth_real_open(struct net_device *);
+static int mv64340_eth_real_stop(struct net_device *);
+static int mv64340_eth_change_mtu(struct net_device *, int);
+static struct net_device_stats *mv64340_eth_get_stats(struct net_device *);
+static void eth_port_init_mac_tables(unsigned int eth_port_num);
+#ifdef MV64340_NAPI
+static int mv64340_poll(struct net_device *dev, int *budget);
+#endif
+
+unsigned char prom_mac_addr_base[6];
+unsigned long mv64340_sram_base;
+
+/*
+ * Changes MTU (maximum transfer unit) of the gigabit ethenret port
+ *
+ * Input : pointer to ethernet interface network device structure
+ * new mtu size
+ * Output : 0 upon success, -EINVAL upon failure
+ */
+static int mv64340_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+
+ if ((new_mtu > 9500) || (new_mtu < 64)) {
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ /*
+ * Stop then re-open the interface. This will allocate RX skb's with
+ * the new MTU.
+ * There is a possible danger that the open will not successed, due
+ * to memory is full, which might fail the open function.
+ */
+ if (netif_running(dev)) {
+ if (mv64340_eth_real_stop(dev))
+ printk(KERN_ERR
+ "%s: Fatal error on stopping device\n",
+ dev->name);
+ if (mv64340_eth_real_open(dev))
+ printk(KERN_ERR
+ "%s: Fatal error on opening device\n",
+ dev->name);
+ }
+
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 0;
+}
+
+/*
+ * mv64340_eth_rx_task
+ *
+ * Fills / refills RX queue on a certain gigabit ethernet port
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv64340_eth_rx_task(void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct mv64340_private *mp = netdev_priv(dev);
+ struct pkt_info pkt_info;
+ struct sk_buff *skb;
+
+ if (test_and_set_bit(0, &mp->rx_task_busy))
+ panic("%s: Error in test_set_bit / clear_bit", dev->name);
+
+ while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
+ /* The +8 for buffer allignment and another 32 byte extra */
+
+ skb = dev_alloc_skb(BUFFER_MTU + 8 + EXTRA_BYTES);
+ if (!skb)
+ /* Better luck next time */
+ break;
+ mp->rx_ring_skbs++;
+ pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
+ pkt_info.byte_cnt = dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES;
+ /* Allign buffer to 8 bytes */
+ if (pkt_info.byte_cnt & ~0x7) {
+ pkt_info.byte_cnt &= ~0x7;
+ pkt_info.byte_cnt += 8;
+ }
+ pkt_info.buf_ptr =
+ pci_map_single(0, skb->data,
+ dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES,
+ PCI_DMA_FROMDEVICE);
+ pkt_info.return_info = skb;
+ if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
+ printk(KERN_ERR
+ "%s: Error allocating RX Ring\n", dev->name);
+ break;
+ }
+ skb_reserve(skb, 2);
+ }
+ clear_bit(0, &mp->rx_task_busy);
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again in a later time .
+ */
+ if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) {
+ printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
+ /* After 100mSec */
+ mp->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&mp->timeout);
+ mp->rx_timer_flag = 1;
+ }
+#if MV64340_RX_QUEUE_FILL_ON_TASK
+ else {
+ /* Return interrupts */
+ MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(mp->port_num),
+ INT_CAUSE_UNMASK_ALL);
+ }
+#endif
+}
+
+/*
+ * mv64340_eth_rx_task_timer_wrapper
+ *
+ * Timer routine to wake up RX queue filling task. This function is
+ * used only in case the RX queue is empty, and all alloc_skb has
+ * failed (due to out of memory event).
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv64340_eth_rx_task_timer_wrapper(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct mv64340_private *mp = netdev_priv(dev);
+
+ mp->rx_timer_flag = 0;
+ mv64340_eth_rx_task((void *) data);
+}
+
+
+/*
+ * mv64340_eth_update_mac_address
+ *
+ * Update the MAC address of the port in the address table
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv64340_eth_update_mac_address(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+
+ eth_port_init_mac_tables(port_num);
+ memcpy(mp->port_mac_addr, dev->dev_addr, 6);
+ eth_port_uc_addr_set(port_num, mp->port_mac_addr);
+}
+
+/*
+ * mv64340_eth_set_rx_mode
+ *
+ * Change from promiscuos to regular rx mode
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv64340_eth_set_rx_mode(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ethernet_set_config_reg
+ (mp->port_num,
+ ethernet_get_config_reg(mp->port_num) |
+ ETH_UNICAST_PROMISCUOUS_MODE);
+ } else {
+ ethernet_set_config_reg
+ (mp->port_num,
+ ethernet_get_config_reg(mp->port_num) &
+ ~(unsigned int) ETH_UNICAST_PROMISCUOUS_MODE);
+ }
+}
+
+
+/*
+ * mv64340_eth_set_mac_address
+ *
+ * Change the interface's mac address.
+ * No special hardware thing should be done because interface is always
+ * put in promiscuous mode.
+ *
+ * Input : pointer to ethernet interface network device structure and
+ * a pointer to the designated entry to be added to the cache.
+ * Output : zero upon success, negative upon failure
+ */
+static int mv64340_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ /* +2 is for the offset of the HW addr type */
+ dev->dev_addr[i] = ((unsigned char *) addr)[i + 2];
+ mv64340_eth_update_mac_address(dev);
+ return 0;
+}
+
+/*
+ * mv64340_eth_tx_timeout
+ *
+ * Called upon a timeout on transmitting a packet
+ *
+ * Input : pointer to ethernet interface network device structure.
+ * Output : N/A
+ */
+static void mv64340_eth_tx_timeout(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: TX timeout ", dev->name);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&mp->tx_timeout_task);
+}
+
+/*
+ * mv64340_eth_tx_timeout_task
+ *
+ * Actual routine to reset the adapter when a timeout on Tx has occurred
+ */
+static void mv64340_eth_tx_timeout_task(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+
+ netif_device_detach(dev);
+ eth_port_reset(mp->port_num);
+ eth_port_start(mp);
+ netif_device_attach(dev);
+}
+
+/*
+ * mv64340_eth_free_tx_queue
+ *
+ * Input : dev - a pointer to the required interface
+ *
+ * Output : 0 if was able to release skb , nonzero otherwise
+ */
+static int mv64340_eth_free_tx_queue(struct net_device *dev,
+ unsigned int eth_int_cause_ext)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &mp->stats;
+ struct pkt_info pkt_info;
+ int released = 1;
+
+ if (!(eth_int_cause_ext & (BIT0 | BIT8)))
+ return released;
+
+ spin_lock(&mp->lock);
+
+ /* Check only queue 0 */
+ while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
+ if (pkt_info.cmd_sts & BIT0) {
+ printk("%s: Error in TX\n", dev->name);
+ stats->tx_errors++;
+ }
+
+ /*
+ * If return_info is different than 0, release the skb.
+ * The case where return_info is not 0 is only in case
+ * when transmitted a scatter/gather packet, where only
+ * last skb releases the whole chain.
+ */
+ if (pkt_info.return_info) {
+ dev_kfree_skb_irq((struct sk_buff *)
+ pkt_info.return_info);
+ released = 0;
+ if (skb_shinfo(pkt_info.return_info)->nr_frags)
+ pci_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, PCI_DMA_TODEVICE);
+
+ if (mp->tx_ring_skbs != 1)
+ mp->tx_ring_skbs--;
+ } else
+ pci_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, PCI_DMA_TODEVICE);
+
+ /*
+ * Decrement the number of outstanding skbs counter on
+ * the TX queue.
+ */
+ if (mp->tx_ring_skbs == 0)
+ panic("ERROR - TX outstanding SKBs counter is corrupted");
+
+ }
+
+ spin_unlock(&mp->lock);
+
+ return released;
+}
+
+/*
+ * mv64340_eth_receive
+ *
+ * This function is forward packets that are received from the port's
+ * queues toward kernel core or FastRoute them to another interface.
+ *
+ * Input : dev - a pointer to the required interface
+ * max - maximum number to receive (0 means unlimted)
+ *
+ * Output : number of served packets
+ */
+#ifdef MV64340_NAPI
+static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max,
+ int budget)
+#else
+static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max)
+#endif
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &mp->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+ struct pkt_info pkt_info;
+
+#ifdef MV64340_NAPI
+ while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) {
+#else
+ while ((--max) && eth_port_receive(mp, &pkt_info) == ETH_OK) {
+#endif
+ mp->rx_ring_skbs--;
+ received_packets++;
+#ifdef MV64340_NAPI
+ budget--;
+#endif
+ /* Update statistics. Note byte count includes 4 byte CRC count */
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_info.byte_cnt;
+ skb = (struct sk_buff *) pkt_info.return_info;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be dropeed.
+ */
+ if (((pkt_info.cmd_sts
+ & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
+ (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
+ || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
+ stats->rx_dropped++;
+ if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |
+ ETH_RX_LAST_DESC)) !=
+ (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Received packet spread on multiple"
+ " descriptors\n",
+ dev->name);
+ }
+ if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)
+ stats->rx_errors++;
+
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, pkt_info.byte_cnt - 4);
+ skb->dev = dev;
+
+ if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum = htons((pkt_info.cmd_sts
+ & 0x0007fff8) >> 3);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+#ifdef MV64340_NAPI
+ netif_receive_skb(skb);
+#else
+ netif_rx(skb);
+#endif
+ }
+ }
+
+ return received_packets;
+}
+
+/*
+ * mv64340_eth_int_handler
+ *
+ * Main interrupt handler for the gigbit ethernet ports
+ *
+ * Input : irq - irq number (not used)
+ * dev_id - a pointer to the required interface's data structure
+ * regs - not used
+ * Output : N/A
+ */
+
+static irqreturn_t mv64340_eth_int_handler(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mv64340_private *mp = netdev_priv(dev);
+ u32 eth_int_cause, eth_int_cause_ext = 0;
+ unsigned int port_num = mp->port_num;
+
+ /* Read interrupt cause registers */
+ eth_int_cause = MV_READ(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num)) &
+ INT_CAUSE_UNMASK_ALL;
+
+ if (eth_int_cause & BIT1)
+ eth_int_cause_ext =
+ MV_READ(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
+ INT_CAUSE_UNMASK_ALL_EXT;
+
+#ifdef MV64340_NAPI
+ if (!(eth_int_cause & 0x0007fffd)) {
+ /* Dont ack the Rx interrupt */
+#endif
+ /*
+ * Clear specific ethernet port intrerrupt registers by
+ * acknowleding relevant bits.
+ */
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),
+ ~eth_int_cause);
+ if (eth_int_cause_ext != 0x0)
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
+ ~eth_int_cause_ext);
+
+ /* UDP change : We may need this */
+ if ((eth_int_cause_ext & 0x0000ffff) &&
+ (mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
+ (MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1))
+ netif_wake_queue(dev);
+#ifdef MV64340_NAPI
+ } else {
+ if (netif_rx_schedule_prep(dev)) {
+ /* Mask all the interrupts */
+ MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),0);
+ MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
+ __netif_rx_schedule(dev);
+ }
+#else
+ {
+ if (eth_int_cause & (BIT2 | BIT11))
+ mv64340_eth_receive_queue(dev, 0);
+
+ /*
+ * After forwarded received packets to upper layer, add a task
+ * in an interrupts enabled context that refills the RX ring
+ * with skb's.
+ */
+#if MV64340_RX_QUEUE_FILL_ON_TASK
+ /* Unmask all interrupts on ethernet port */
+ MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
+ INT_CAUSE_MASK_ALL);
+ queue_task(&mp->rx_task, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#else
+ mp->rx_task.func(dev);
+#endif
+#endif
+ }
+ /* PHY status changed */
+ if (eth_int_cause_ext & (BIT16 | BIT20)) {
+ unsigned int phy_reg_data;
+
+ /* Check Link status on ethernet port */
+ eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
+ if (!(phy_reg_data & 0x20)) {
+ netif_stop_queue(dev);
+ } else {
+ netif_wake_queue(dev);
+
+ /*
+ * Start all TX queues on ethernet port. This is good in
+ * case of previous packets where not transmitted, due
+ * to link down and this command re-enables all TX
+ * queues.
+ * Note that it is possible to get a TX resource error
+ * interrupt after issuing this, since not all TX queues
+ * are enabled, or has anything to send.
+ */
+ MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 1);
+ }
+ }
+
+ /*
+ * If no real interrupt occured, exit.
+ * This can happen when using gigE interrupt coalescing mechanism.
+ */
+ if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+#ifdef MV64340_COAL
+
+/*
+ * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
+ *
+ * DESCRIPTION:
+ * This routine sets the RX coalescing interrupt mechanism parameter.
+ * This parameter is a timeout counter, that counts in 64 t_clk
+ * chunks ; that when timeout event occurs a maskable interrupt
+ * occurs.
+ * The parameter is calculated using the tClk of the MV-643xx chip
+ * , and the required delay of the interrupt in usec.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet port number
+ * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
+ * unsigned int delay Delay in usec
+ *
+ * OUTPUT:
+ * Interrupt coalescing mechanism value is set in MV-643xx chip.
+ *
+ * RETURN:
+ * The interrupt coalescing value set in the gigE port.
+ *
+ */
+static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
+ unsigned int t_clk, unsigned int delay)
+{
+ unsigned int coal = ((t_clk / 1000000) * delay) / 64;
+
+ /* Set RX Coalescing mechanism */
+ MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
+ ((coal & 0x3fff) << 8) |
+ (MV_READ(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num))
+ & 0xffc000ff));
+
+ return coal;
+}
+#endif
+
+/*
+ * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
+ *
+ * DESCRIPTION:
+ * This routine sets the TX coalescing interrupt mechanism parameter.
+ * This parameter is a timeout counter, that counts in 64 t_clk
+ * chunks ; that when timeout event occurs a maskable interrupt
+ * occurs.
+ * The parameter is calculated using the t_cLK frequency of the
+ * MV-643xx chip and the required delay in the interrupt in uSec
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet port number
+ * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
+ * unsigned int delay Delay in uSeconds
+ *
+ * OUTPUT:
+ * Interrupt coalescing mechanism value is set in MV-643xx chip.
+ *
+ * RETURN:
+ * The interrupt coalescing value set in the gigE port.
+ *
+ */
+static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
+ unsigned int t_clk, unsigned int delay)
+{
+ unsigned int coal;
+ coal = ((t_clk / 1000000) * delay) / 64;
+ /* Set TX Coalescing mechanism */
+ MV_WRITE(MV64340_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num),
+ coal << 4);
+ return coal;
+}
+
+/*
+ * mv64340_eth_open
+ *
+ * This function is called when openning the network device. The function
+ * should initialize all the hardware, initialize cyclic Rx/Tx
+ * descriptors chain and buffers and allocate an IRQ to the network
+ * device.
+ *
+ * Input : a pointer to the network device structure
+ *
+ * Output : zero of success , nonzero if fails.
+ */
+
+static int mv64340_eth_open(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ int err = err;
+
+ spin_lock_irq(&mp->lock);
+
+ err = request_irq(dev->irq, mv64340_eth_int_handler,
+ SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev);
+
+ if (err) {
+ printk(KERN_ERR "Can not assign IRQ number to MV64340_eth%d\n",
+ port_num);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ if (mv64340_eth_real_open(dev)) {
+ printk("%s: Error opening interface\n", dev->name);
+ err = -EBUSY;
+ goto out_free;
+ }
+
+ spin_unlock_irq(&mp->lock);
+
+ return 0;
+
+out_free:
+ free_irq(dev->irq, dev);
+
+out:
+ spin_unlock_irq(&mp->lock);
+
+ return err;
+}
+
+/*
+ * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
+ *
+ * DESCRIPTION:
+ * This function prepares a Rx chained list of descriptors and packet
+ * buffers in a form of a ring. The routine must be called after port
+ * initialization routine and before port start routine.
+ * The Ethernet SDMA engine uses CPU bus addresses to access the various
+ * devices in the system (i.e. DRAM). This function uses the ethernet
+ * struct 'virtual to physical' routine (set by the user) to set the ring
+ * with physical addresses.
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet Port Control srtuct.
+ * int rx_desc_num Number of Rx descriptors
+ * int rx_buff_size Size of Rx buffer
+ * unsigned int rx_desc_base_addr Rx descriptors memory area base addr.
+ * unsigned int rx_buff_base_addr Rx buffer memory area base addr.
+ *
+ * OUTPUT:
+ * The routine updates the Ethernet port control struct with information
+ * regarding the Rx descriptors and buffers.
+ *
+ * RETURN:
+ * false if the given descriptors memory area is not aligned according to
+ * Ethernet SDMA specifications.
+ * true otherwise.
+ */
+static int ether_init_rx_desc_ring(struct mv64340_private * mp,
+ unsigned long rx_buff_base_addr)
+{
+ unsigned long buffer_addr = rx_buff_base_addr;
+ volatile struct eth_rx_desc *p_rx_desc;
+ int rx_desc_num = mp->rx_ring_size;
+ unsigned long rx_desc_base_addr = (unsigned long) mp->p_rx_desc_area;
+ int rx_buff_size = 1536; /* Dummy, will be replaced later */
+ int i;
+
+ p_rx_desc = (struct eth_rx_desc *) rx_desc_base_addr;
+
+ /* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
+ if (rx_buff_base_addr & 0xf)
+ return 0;
+
+ /* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes */
+ if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
+ return 0;
+
+ /* Rx buffers must be 64-bit aligned. */
+ if ((rx_buff_base_addr + rx_buff_size) & 0x7)
+ return 0;
+
+ /* initialize the Rx descriptors ring */
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].buf_size = rx_buff_size;
+ p_rx_desc[i].byte_cnt = 0x0000;
+ p_rx_desc[i].cmd_sts =
+ ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
+ p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);
+ p_rx_desc[i].buf_ptr = buffer_addr;
+
+ mp->rx_skb[i] = NULL;
+ buffer_addr += rx_buff_size;
+ }
+
+ /* Save Rx desc pointer to driver struct. */
+ mp->rx_curr_desc_q = 0;
+ mp->rx_used_desc_q = 0;
+
+ mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
+
+ mp->port_rx_queue_command |= 1;
+
+ return 1;
+}
+
+/*
+ * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
+ *
+ * DESCRIPTION:
+ * This function prepares a Tx chained list of descriptors and packet
+ * buffers in a form of a ring. The routine must be called after port
+ * initialization routine and before port start routine.
+ * The Ethernet SDMA engine uses CPU bus addresses to access the various
+ * devices in the system (i.e. DRAM). This function uses the ethernet
+ * struct 'virtual to physical' routine (set by the user) to set the ring
+ * with physical addresses.
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet Port Control srtuct.
+ * int tx_desc_num Number of Tx descriptors
+ * int tx_buff_size Size of Tx buffer
+ * unsigned int tx_desc_base_addr Tx descriptors memory area base addr.
+ *
+ * OUTPUT:
+ * The routine updates the Ethernet port control struct with information
+ * regarding the Tx descriptors and buffers.
+ *
+ * RETURN:
+ * false if the given descriptors memory area is not aligned according to
+ * Ethernet SDMA specifications.
+ * true otherwise.
+ */
+static int ether_init_tx_desc_ring(struct mv64340_private *mp)
+{
+ unsigned long tx_desc_base_addr = (unsigned long) mp->p_tx_desc_area;
+ int tx_desc_num = mp->tx_ring_size;
+ struct eth_tx_desc *p_tx_desc;
+ int i;
+
+ /* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
+ if (tx_desc_base_addr & 0xf)
+ return 0;
+
+ /* save the first desc pointer to link with the last descriptor */
+ p_tx_desc = (struct eth_tx_desc *) tx_desc_base_addr;
+
+ /* Initialize the Tx descriptors ring */
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].byte_cnt = 0x0000;
+ p_tx_desc[i].l4i_chk = 0x0000;
+ p_tx_desc[i].cmd_sts = 0x00000000;
+ p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);
+ p_tx_desc[i].buf_ptr = 0x00000000;
+ mp->tx_skb[i] = NULL;
+ }
+
+ /* Set Tx desc pointer in driver struct. */
+ mp->tx_curr_desc_q = 0;
+ mp->tx_used_desc_q = 0;
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ mp->tx_first_desc_q = 0;
+#endif
+ /* Init Tx ring base and size parameters */
+ mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
+
+ /* Add the queue to the list of Tx queues of this port */
+ mp->port_tx_queue_command |= 1;
+
+ return 1;
+}
+
+/* Helper function for mv64340_eth_open */
+static int mv64340_eth_real_open(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ u32 phy_reg_data;
+ unsigned int size;
+
+ /* Stop RX Queues */
+ MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
+ 0x0000ff00);
+
+ /* Clear the ethernet port interrupts */
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+
+ /* Unmask RX buffer and TX end interrupt */
+ MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL);
+
+ /* Unmask phy and link status changes interrupts */
+ MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL_EXT);
+
+ /* Set the MAC Address */
+ memcpy(mp->port_mac_addr, dev->dev_addr, 6);
+
+ eth_port_init(mp);
+
+ INIT_WORK(&mp->rx_task, (void (*)(void *)) mv64340_eth_rx_task, dev);
+
+ memset(&mp->timeout, 0, sizeof(struct timer_list));
+ mp->timeout.function = mv64340_eth_rx_task_timer_wrapper;
+ mp->timeout.data = (unsigned long) dev;
+
+ mp->rx_task_busy = 0;
+ mp->rx_timer_flag = 0;
+
+ /* Allocate TX ring */
+ mp->tx_ring_skbs = 0;
+ mp->tx_ring_size = MV64340_TX_QUEUE_SIZE;
+ size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
+ mp->tx_desc_area_size = size;
+
+ /* Assumes allocated ring is 16 bytes alligned */
+ mp->p_tx_desc_area = pci_alloc_consistent(NULL, size, &mp->tx_desc_dma);
+ if (!mp->p_tx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+ dev->name, size);
+ return -ENOMEM;
+ }
+ memset((void *) mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
+
+ /* Dummy will be replaced upon real tx */
+ ether_init_tx_desc_ring(mp);
+
+ /* Allocate RX ring */
+ /* Meantime RX Ring are fixed - but must be configurable by user */
+ mp->rx_ring_size = MV64340_RX_QUEUE_SIZE;
+ mp->rx_ring_skbs = 0;
+ size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
+ mp->rx_desc_area_size = size;
+
+ /* Assumes allocated ring is 16 bytes aligned */
+
+ mp->p_rx_desc_area = pci_alloc_consistent(NULL, size, &mp->rx_desc_dma);
+
+ if (!mp->p_rx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
+ dev->name, size);
+ printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
+ dev->name);
+ pci_free_consistent(0, mp->tx_desc_area_size,
+ (void *) mp->p_tx_desc_area,
+ mp->tx_desc_dma);
+ return -ENOMEM;
+ }
+ memset(mp->p_rx_desc_area, 0, size);
+
+ if (!(ether_init_rx_desc_ring(mp, 0)))
+ panic("%s: Error initializing RX Ring", dev->name);
+
+ mv64340_eth_rx_task(dev); /* Fill RX ring with skb's */
+
+ eth_port_start(mp);
+
+ /* Interrupt Coalescing */
+
+#ifdef MV64340_COAL
+ mp->rx_int_coal =
+ eth_port_set_rx_coal(port_num, 133000000, MV64340_RX_COAL);
+#endif
+
+ mp->tx_int_coal =
+ eth_port_set_tx_coal (port_num, 133000000, MV64340_TX_COAL);
+
+ /* Increase the Rx side buffer size */
+
+ MV_WRITE (MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num), (0x5 << 17) |
+ (MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num))
+ & 0xfff1ffff));
+
+ /* Check Link status on phy */
+ eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
+ if (!(phy_reg_data & 0x20))
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void mv64340_eth_free_tx_rings(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ unsigned int curr;
+
+ /* Stop Tx Queues */
+ MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
+ 0x0000ff00);
+
+ /* Free TX rings */
+ /* Free outstanding skb's on TX rings */
+ for (curr = 0;
+ (mp->tx_ring_skbs) && (curr < MV64340_TX_QUEUE_SIZE);
+ curr++) {
+ if (mp->tx_skb[curr]) {
+ dev_kfree_skb(mp->tx_skb[curr]);
+ mp->tx_ring_skbs--;
+ }
+ }
+ if (mp->tx_ring_skbs != 0)
+ printk("%s: Error on Tx descriptor free - could not free %d"
+ " descriptors\n", dev->name,
+ mp->tx_ring_skbs);
+ pci_free_consistent(0, mp->tx_desc_area_size,
+ (void *) mp->p_tx_desc_area, mp->tx_desc_dma);
+}
+
+static void mv64340_eth_free_rx_rings(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ int curr;
+
+ /* Stop RX Queues */
+ MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
+ 0x0000ff00);
+
+ /* Free RX rings */
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0;
+ mp->rx_ring_skbs && (curr < MV64340_RX_QUEUE_SIZE);
+ curr++) {
+ if (mp->rx_skb[curr]) {
+ dev_kfree_skb(mp->rx_skb[curr]);
+ mp->rx_ring_skbs--;
+ }
+ }
+
+ if (mp->rx_ring_skbs != 0)
+ printk(KERN_ERR
+ "%s: Error in freeing Rx Ring. %d skb's still"
+ " stuck in RX Ring - ignoring them\n", dev->name,
+ mp->rx_ring_skbs);
+ pci_free_consistent(0, mp->rx_desc_area_size,
+ (void *) mp->p_rx_desc_area,
+ mp->rx_desc_dma);
+}
+
+/*
+ * mv64340_eth_stop
+ *
+ * This function is used when closing the network device.
+ * It updates the hardware,
+ * release all memory that holds buffers and descriptors and release the IRQ.
+ * Input : a pointer to the device structure
+ * Output : zero if success , nonzero if fails
+ */
+
+/* Helper function for mv64340_eth_stop */
+
+static int mv64340_eth_real_stop(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+
+ netif_stop_queue(dev);
+
+ mv64340_eth_free_tx_rings(dev);
+ mv64340_eth_free_rx_rings(dev);
+
+ eth_port_reset(mp->port_num);
+
+ /* Disable ethernet port interrupts */
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+
+ /* Mask RX buffer and TX end interrupt */
+ MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), 0);
+
+ /* Mask phy and link status changes interrupts */
+ MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
+
+ return 0;
+}
+
+static int mv64340_eth_stop(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+
+ spin_lock_irq(&mp->lock);
+
+ mv64340_eth_real_stop(dev);
+
+ free_irq(dev->irq, dev);
+ spin_unlock_irq(&mp->lock);
+
+ return 0;
+}
+
+#ifdef MV64340_NAPI
+static void mv64340_tx(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ struct pkt_info pkt_info;
+
+ while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
+ if (pkt_info.return_info) {
+ dev_kfree_skb_irq((struct sk_buff *)
+ pkt_info.return_info);
+ if (skb_shinfo(pkt_info.return_info)->nr_frags)
+ pci_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ PCI_DMA_TODEVICE);
+
+ if (mp->tx_ring_skbs != 1)
+ mp->tx_ring_skbs--;
+ } else
+ pci_unmap_page(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt,
+ PCI_DMA_TODEVICE);
+ }
+
+ if (netif_queue_stopped(dev) &&
+ MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1)
+ netif_wake_queue(dev);
+}
+
+/*
+ * mv64340_poll
+ *
+ * This function is used in case of NAPI
+ */
+static int mv64340_poll(struct net_device *dev, int *budget)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ int done = 1, orig_budget, work_done;
+ unsigned int port_num = mp->port_num;
+ unsigned long flags;
+
+#ifdef MV64340_TX_FAST_REFILL
+ if (++mp->tx_clean_threshold > 5) {
+ spin_lock_irqsave(&mp->lock, flags);
+ mv64340_tx(dev);
+ mp->tx_clean_threshold = 0;
+ spin_unlock_irqrestore(&mp->lock, flags);
+ }
+#endif
+
+ if ((u32)(MV_READ(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) != (u32)mp->rx_used_desc_q) {
+ orig_budget = *budget;
+ if (orig_budget > dev->quota)
+ orig_budget = dev->quota;
+ work_done = mv64340_eth_receive_queue(dev, 0, orig_budget);
+ mp->rx_task.func(dev);
+ *budget -= work_done;
+ dev->quota -= work_done;
+ if (work_done >= orig_budget)
+ done = 0;
+ }
+
+ if (done) {
+ spin_lock_irqsave(&mp->lock, flags);
+ __netif_rx_complete(dev);
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),0);
+ MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),0);
+ MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL);
+ MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL_EXT);
+ spin_unlock_irqrestore(&mp->lock, flags);
+ }
+
+ return done ? 0 : 1;
+}
+#endif
+
+/*
+ * mv64340_eth_start_xmit
+ *
+ * This function is queues a packet in the Tx descriptor for
+ * required port.
+ *
+ * Input : skb - a pointer to socket buffer
+ * dev - a pointer to the required port
+ *
+ * Output : zero upon success
+ */
+static int mv64340_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &mp->stats;
+ ETH_FUNC_RET_STATUS status;
+ unsigned long flags;
+ struct pkt_info pkt_info;
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_ERR
+ "%s: Tried sending packet when interface is stopped\n",
+ dev->name);
+ return 1;
+ }
+
+ /* This is a hard error, log it. */
+ if ((MV64340_TX_QUEUE_SIZE - mp->tx_ring_skbs) <=
+ (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ printk(KERN_ERR
+ "%s: Bug in mv64340_eth - Trying to transmit when"
+ " queue full !\n", dev->name);
+ return 1;
+ }
+
+ /* Paranoid check - this shouldn't happen */
+ if (skb == NULL) {
+ stats->tx_dropped++;
+ return 1;
+ }
+
+ spin_lock_irqsave(&mp->lock, flags);
+
+ /* Update packet info data structure -- DMA owned, first last */
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ if (!skb_shinfo(skb)->nr_frags || (skb_shinfo(skb)->nr_frags > 3)) {
+#endif
+ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
+ ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
+
+ pkt_info.byte_cnt = skb->len;
+ pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+
+ pkt_info.return_info = skb;
+ status = eth_port_send(mp, &pkt_info);
+ if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
+ printk(KERN_ERR "%s: Error on transmitting packet\n",
+ dev->name);
+ mp->tx_ring_skbs++;
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ } else {
+ unsigned int frag;
+ u32 ipheader;
+
+ /* first frag which is skb header */
+ pkt_info.byte_cnt = skb_headlen(skb);
+ pkt_info.buf_ptr = pci_map_single(0, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ pkt_info.return_info = 0;
+ ipheader = skb->nh.iph->ihl << 11;
+ pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
+ ETH_GEN_TCP_UDP_CHECKSUM |
+ ETH_GEN_IP_V_4_CHECKSUM |
+ ipheader;
+ /* CPU already calculated pseudo header checksum. So, use it */
+ pkt_info.l4i_chk = skb->h.th->check;
+ status = eth_port_send(mp, &pkt_info);
+ if (status != ETH_OK) {
+ if ((status == ETH_ERROR))
+ printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
+ if (status == ETH_QUEUE_FULL)
+ printk("Error on Queue Full \n");
+ if (status == ETH_QUEUE_LAST_RESOURCE)
+ printk("Tx resource error \n");
+ }
+
+ /* Check for the remaining frags */
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ pkt_info.l4i_chk = 0x0000;
+ pkt_info.cmd_sts = 0x00000000;
+
+ /* Last Frag enables interrupt and frees the skb */
+ if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
+ pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT |
+ ETH_TX_LAST_DESC;
+ pkt_info.return_info = skb;
+ mp->tx_ring_skbs++;
+ }
+ else {
+ pkt_info.return_info = 0;
+ }
+ pkt_info.byte_cnt = this_frag->size;
+ if (this_frag->size < 8)
+ printk("%d : \n", skb_shinfo(skb)->nr_frags);
+
+ pkt_info.buf_ptr = pci_map_page(NULL, this_frag->page,
+ this_frag->page_offset,
+ this_frag->size, PCI_DMA_TODEVICE);
+
+ status = eth_port_send(mp, &pkt_info);
+
+ if (status != ETH_OK) {
+ if ((status == ETH_ERROR))
+ printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
+
+ if (status == ETH_QUEUE_LAST_RESOURCE)
+ printk("Tx resource error \n");
+
+ if (status == ETH_QUEUE_FULL)
+ printk("Queue is full \n");
+ }
+ }
+ }
+#endif
+
+ /* Check if TX queue can handle another skb. If not, then
+ * signal higher layers to stop requesting TX
+ */
+ if (MV64340_TX_QUEUE_SIZE <= (mp->tx_ring_skbs + 1))
+ /*
+ * Stop getting skb's from upper layers.
+ * Getting skb's from upper layers will be enabled again after
+ * packets are released.
+ */
+ netif_stop_queue(dev);
+
+ /* Update statistics and start of transmittion time */
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+
+ spin_unlock_irqrestore(&mp->lock, flags);
+
+ return 0; /* success */
+}
+
+/*
+ * mv64340_eth_get_stats
+ *
+ * Returns a pointer to the interface statistics.
+ *
+ * Input : dev - a pointer to the required interface
+ *
+ * Output : a pointer to the interface's statistics
+ */
+
+static struct net_device_stats *mv64340_eth_get_stats(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+
+ return &mp->stats;
+}
+
+/*/
+ * mv64340_eth_init
+ *
+ * First function called after registering the network device.
+ * It's purpose is to initialize the device as an ethernet device,
+ * fill the structure that was given in registration with pointers
+ * to functions, and setting the MAC address of the interface
+ *
+ * Input : number of port to initialize
+ * Output : -ENONMEM if failed , 0 if success
+ */
+static struct net_device *mv64340_eth_init(int port_num)
+{
+ struct mv64340_private *mp;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mv64340_private));
+ if (!dev)
+ return NULL;
+
+ mp = netdev_priv(dev);
+
+ dev->irq = ETH_PORT0_IRQ_NUM + port_num;
+
+ dev->open = mv64340_eth_open;
+ dev->stop = mv64340_eth_stop;
+ dev->hard_start_xmit = mv64340_eth_start_xmit;
+ dev->get_stats = mv64340_eth_get_stats;
+ dev->set_mac_address = mv64340_eth_set_mac_address;
+ dev->set_multicast_list = mv64340_eth_set_rx_mode;
+
+ /* No need to Tx Timeout */
+ dev->tx_timeout = mv64340_eth_tx_timeout;
+#ifdef MV64340_NAPI
+ dev->poll = mv64340_poll;
+ dev->weight = 64;
+#endif
+
+ dev->watchdog_timeo = 2 * HZ;
+ dev->tx_queue_len = MV64340_TX_QUEUE_SIZE;
+ dev->base_addr = 0;
+ dev->change_mtu = mv64340_eth_change_mtu;
+
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+#ifdef MAX_SKB_FRAGS
+#ifndef CONFIG_JAGUAR_DMALOW
+ /*
+ * Zero copy can only work if we use Discovery II memory. Else, we will
+ * have to map the buffers to ISA memory which is only 16 MB
+ */
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
+#endif
+#endif
+#endif
+
+ mp->port_num = port_num;
+
+ /* Configure the timeout task */
+ INIT_WORK(&mp->tx_timeout_task,
+ (void (*)(void *))mv64340_eth_tx_timeout_task, dev);
+
+ spin_lock_init(&mp->lock);
+
+ /* set MAC addresses */
+ memcpy(dev->dev_addr, prom_mac_addr_base, 6);
+ dev->dev_addr[5] += port_num;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out_free_dev;
+
+ printk(KERN_NOTICE "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, port_num,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ if (dev->features & NETIF_F_SG)
+ printk("Scatter Gather Enabled ");
+
+ if (dev->features & NETIF_F_IP_CSUM)
+ printk("TX TCP/IP Checksumming Supported \n");
+
+ printk("RX TCP/UDP Checksum Offload ON, \n");
+ printk("TX and RX Interrupt Coalescing ON \n");
+
+#ifdef MV64340_NAPI
+ printk("RX NAPI Enabled \n");
+#endif
+
+ return dev;
+
+out_free_dev:
+ free_netdev(dev);
+
+ return NULL;
+}
+
+static void mv64340_eth_remove(struct net_device *dev)
+{
+ struct mv64340_private *mp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ flush_scheduled_work();
+ free_netdev(dev);
+}
+
+static struct net_device *mv64340_dev0;
+static struct net_device *mv64340_dev1;
+static struct net_device *mv64340_dev2;
+
+/*
+ * mv64340_init_module
+ *
+ * Registers the network drivers into the Linux kernel
+ *
+ * Input : N/A
+ *
+ * Output : N/A
+ */
+static int __init mv64340_init_module(void)
+{
+ printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
+
+#ifdef CONFIG_MV643XX_ETH_0
+ mv64340_dev0 = mv64340_eth_init(0);
+ if (!mv64340_dev0) {
+ printk(KERN_ERR
+ "Error registering MV-64360 ethernet port 0\n");
+ }
+#endif
+#ifdef CONFIG_MV643XX_ETH_1
+ mv64340_dev1 = mv64340_eth_init(1);
+ if (!mv64340_dev1) {
+ printk(KERN_ERR
+ "Error registering MV-64360 ethernet port 1\n");
+ }
+#endif
+#ifdef CONFIG_MV643XX_ETH_2
+ mv64340_dev2 = mv64340_eth_init(2);
+ if (!mv64340_dev2) {
+ printk(KERN_ERR
+ "Error registering MV-64360 ethernet port 2\n");
+ }
+#endif
+ return 0;
+}
+
+/*
+ * mv64340_cleanup_module
+ *
+ * Registers the network drivers into the Linux kernel
+ *
+ * Input : N/A
+ *
+ * Output : N/A
+ */
+static void __exit mv64340_cleanup_module(void)
+{
+ if (mv64340_dev2)
+ mv64340_eth_remove(mv64340_dev2);
+ if (mv64340_dev1)
+ mv64340_eth_remove(mv64340_dev1);
+ if (mv64340_dev0)
+ mv64340_eth_remove(mv64340_dev0);
+}
+
+module_init(mv64340_init_module);
+module_exit(mv64340_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm and Manish Lachwani");
+MODULE_DESCRIPTION("Ethernet driver for Marvell MV64340");
+
+/*
+ * The second part is the low level driver of the gigE ethernet ports.
+ */
+
+/*
+ * Marvell's Gigabit Ethernet controller low level driver
+ *
+ * DESCRIPTION:
+ * This file introduce low level API to Marvell's Gigabit Ethernet
+ * controller. This Gigabit Ethernet Controller driver API controls
+ * 1) Operations (i.e. port init, start, reset etc').
+ * 2) Data flow (i.e. port send, receive etc').
+ * Each Gigabit Ethernet port is controlled via
+ * struct mv64340_private.
+ * This struct includes user configuration information as well as
+ * driver internal data needed for its operations.
+ *
+ * Supported Features:
+ * - This low level driver is OS independent. Allocating memory for
+ * the descriptor rings and buffers are not within the scope of
+ * this driver.
+ * - The user is free from Rx/Tx queue managing.
+ * - This low level driver introduce functionality API that enable
+ * the to operate Marvell's Gigabit Ethernet Controller in a
+ * convenient way.
+ * - Simple Gigabit Ethernet port operation API.
+ * - Simple Gigabit Ethernet port data flow API.
+ * - Data flow and operation API support per queue functionality.
+ * - Support cached descriptors for better performance.
+ * - Enable access to all four DRAM banks and internal SRAM memory
+ * spaces.
+ * - PHY access and control API.
+ * - Port control register configuration API.
+ * - Full control over Unicast and Multicast MAC configurations.
+ *
+ * Operation flow:
+ *
+ * Initialization phase
+ * This phase complete the initialization of the the mv64340_private
+ * struct.
+ * User information regarding port configuration has to be set
+ * prior to calling the port initialization routine.
+ *
+ * In this phase any port Tx/Rx activity is halted, MIB counters
+ * are cleared, PHY address is set according to user parameter and
+ * access to DRAM and internal SRAM memory spaces.
+ *
+ * Driver ring initialization
+ * Allocating memory for the descriptor rings and buffers is not
+ * within the scope of this driver. Thus, the user is required to
+ * allocate memory for the descriptors ring and buffers. Those
+ * memory parameters are used by the Rx and Tx ring initialization
+ * routines in order to curve the descriptor linked list in a form
+ * of a ring.
+ * Note: Pay special attention to alignment issues when using
+ * cached descriptors/buffers. In this phase the driver store
+ * information in the mv64340_private struct regarding each queue
+ * ring.
+ *
+ * Driver start
+ * This phase prepares the Ethernet port for Rx and Tx activity.
+ * It uses the information stored in the mv64340_private struct to
+ * initialize the various port registers.
+ *
+ * Data flow:
+ * All packet references to/from the driver are done using
+ * struct pkt_info.
+ * This struct is a unified struct used with Rx and Tx operations.
+ * This way the user is not required to be familiar with neither
+ * Tx nor Rx descriptors structures.
+ * The driver's descriptors rings are management by indexes.
+ * Those indexes controls the ring resources and used to indicate
+ * a SW resource error:
+ * 'current'
+ * This index points to the current available resource for use. For
+ * example in Rx process this index will point to the descriptor
+ * that will be passed to the user upon calling the receive routine.
+ * In Tx process, this index will point to the descriptor
+ * that will be assigned with the user packet info and transmitted.
+ * 'used'
+ * This index points to the descriptor that need to restore its
+ * resources. For example in Rx process, using the Rx buffer return
+ * API will attach the buffer returned in packet info to the
+ * descriptor pointed by 'used'. In Tx process, using the Tx
+ * descriptor return will merely return the user packet info with
+ * the command status of the transmitted buffer pointed by the
+ * 'used' index. Nevertheless, it is essential to use this routine
+ * to update the 'used' index.
+ * 'first'
+ * This index supports Tx Scatter-Gather. It points to the first
+ * descriptor of a packet assembled of multiple buffers. For example
+ * when in middle of Such packet we have a Tx resource error the
+ * 'curr' index get the value of 'first' to indicate that the ring
+ * returned to its state before trying to transmit this packet.
+ *
+ * Receive operation:
+ * The eth_port_receive API set the packet information struct,
+ * passed by the caller, with received information from the
+ * 'current' SDMA descriptor.
+ * It is the user responsibility to return this resource back
+ * to the Rx descriptor ring to enable the reuse of this source.
+ * Return Rx resource is done using the eth_rx_return_buff API.
+ *
+ * Transmit operation:
+ * The eth_port_send API supports Scatter-Gather which enables to
+ * send a packet spanned over multiple buffers. This means that
+ * for each packet info structure given by the user and put into
+ * the Tx descriptors ring, will be transmitted only if the 'LAST'
+ * bit will be set in the packet info command status field. This
+ * API also consider restriction regarding buffer alignments and
+ * sizes.
+ * The user must return a Tx resource after ensuring the buffer
+ * has been transmitted to enable the Tx ring indexes to update.
+ *
+ * BOARD LAYOUT
+ * This device is on-board. No jumper diagram is necessary.
+ *
+ * EXTERNAL INTERFACE
+ *
+ * Prior to calling the initialization routine eth_port_init() the user
+ * must set the following fields under mv64340_private struct:
+ * port_num User Ethernet port number.
+ * port_mac_addr[6] User defined port MAC address.
+ * port_config User port configuration value.
+ * port_config_extend User port config extend value.
+ * port_sdma_config User port SDMA config value.
+ * port_serial_control User port serial control value.
+ *
+ * This driver introduce a set of default values:
+ * PORT_CONFIG_VALUE Default port configuration value
+ * PORT_CONFIG_EXTEND_VALUE Default port extend configuration value
+ * PORT_SDMA_CONFIG_VALUE Default sdma control value
+ * PORT_SERIAL_CONTROL_VALUE Default port serial control value
+ *
+ * This driver data flow is done using the struct pkt_info which
+ * is a unified struct for Rx and Tx operations:
+ *
+ * byte_cnt Tx/Rx descriptor buffer byte count.
+ * l4i_chk CPU provided TCP Checksum. For Tx operation
+ * only.
+ * cmd_sts Tx/Rx descriptor command status.
+ * buf_ptr Tx/Rx descriptor buffer pointer.
+ * return_info Tx/Rx user resource return information.
+ */
+
+/* defines */
+/* SDMA command macros */
+#define ETH_ENABLE_TX_QUEUE(eth_port) \
+ MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
+
+#define ETH_DISABLE_TX_QUEUE(eth_port) \
+ MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), \
+ (1 << 8))
+
+#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
+ MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
+ (1 << rx_queue))
+
+#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
+ MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
+ (1 << (8 + rx_queue)))
+
+#define LINK_UP_TIMEOUT 100000
+#define PHY_BUSY_TIMEOUT 10000000
+
+/* locals */
+
+/* PHY routines */
+static int ethernet_phy_get(unsigned int eth_port_num);
+
+/* Ethernet Port routines */
+static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
+ int option);
+
+/*
+ * eth_port_init - Initialize the Ethernet port driver
+ *
+ * DESCRIPTION:
+ * This function prepares the ethernet port to start its activity:
+ * 1) Completes the ethernet port driver struct initialization toward port
+ * start routine.
+ * 2) Resets the device to a quiescent state in case of warm reboot.
+ * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
+ * 4) Clean MAC tables. The reset status of those tables is unknown.
+ * 5) Set PHY address.
+ * Note: Call this routine prior to eth_port_start routine and after
+ * setting user values in the user fields of Ethernet port control
+ * struct.
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet port control struct
+ *
+ * OUTPUT:
+ * See description.
+ *
+ * RETURN:
+ * None.
+ */
+static void eth_port_init(struct mv64340_private * mp)
+{
+ mp->port_config = PORT_CONFIG_VALUE;
+ mp->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
+#if defined(__BIG_ENDIAN)
+ mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
+#elif defined(__LITTLE_ENDIAN)
+ mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE |
+ ETH_BLM_RX_NO_SWAP | ETH_BLM_TX_NO_SWAP;
+#else
+#error One of __LITTLE_ENDIAN or __BIG_ENDIAN must be defined!
+#endif
+ mp->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
+
+ mp->port_rx_queue_command = 0;
+ mp->port_tx_queue_command = 0;
+
+ mp->rx_resource_err = 0;
+ mp->tx_resource_err = 0;
+
+ eth_port_reset(mp->port_num);
+
+ eth_port_init_mac_tables(mp->port_num);
+
+ ethernet_phy_reset(mp->port_num);
+}
+
+/*
+ * eth_port_start - Start the Ethernet port activity.
+ *
+ * DESCRIPTION:
+ * This routine prepares the Ethernet port for Rx and Tx activity:
+ * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
+ * has been initialized a descriptor's ring (using
+ * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
+ * 2. Initialize and enable the Ethernet configuration port by writing to
+ * the port's configuration and command registers.
+ * 3. Initialize and enable the SDMA by writing to the SDMA's
+ * configuration and command registers. After completing these steps,
+ * the ethernet port SDMA can starts to perform Rx and Tx activities.
+ *
+ * Note: Each Rx and Tx queue descriptor's list must be initialized prior
+ * to calling this function (use ether_init_tx_desc_ring for Tx queues
+ * and ether_init_rx_desc_ring for Rx queues).
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet port control struct
+ *
+ * OUTPUT:
+ * Ethernet port is ready to receive and transmit.
+ *
+ * RETURN:
+ * false if the port PHY is not up.
+ * true otherwise.
+ */
+static int eth_port_start(struct mv64340_private *mp)
+{
+ unsigned int eth_port_num = mp->port_num;
+ int tx_curr_desc, rx_curr_desc;
+ unsigned int phy_reg_data;
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = mp->tx_curr_desc_q;
+ MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
+ (struct eth_tx_desc *) mp->tx_desc_dma + tx_curr_desc);
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = mp->rx_curr_desc_q;
+ MV_WRITE(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
+ (struct eth_rx_desc *) mp->rx_desc_dma + rx_curr_desc);
+
+ /* Add the assigned Ethernet address to the port's address table */
+ eth_port_uc_addr_set(mp->port_num, mp->port_mac_addr);
+
+ /* Assign port configuration and command. */
+ MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
+ mp->port_config);
+
+ MV_WRITE(MV64340_ETH_PORT_CONFIG_EXTEND_REG(eth_port_num),
+ mp->port_config_extend);
+
+ MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
+ mp->port_serial_control);
+
+ MV_SET_REG_BITS(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
+ ETH_SERIAL_PORT_ENABLE);
+
+ /* Assign port SDMA configuration */
+ MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
+ mp->port_sdma_config);
+
+ /* Enable port Rx. */
+ MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port_num),
+ mp->port_rx_queue_command);
+
+ /* Check if link is up */
+ eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
+
+ if (!(phy_reg_data & 0x20))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * eth_port_uc_addr_set - This function Set the port Unicast address.
+ *
+ * DESCRIPTION:
+ * This function Set the port Ethernet MAC address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Port number.
+ * char * p_addr Address to be set
+ *
+ * OUTPUT:
+ * Set MAC address low and high registers. also calls eth_port_uc_addr()
+ * To set the unicast table with the proper information.
+ *
+ * RETURN:
+ * N/A.
+ *
+ */
+static void eth_port_uc_addr_set(unsigned int eth_port_num,
+ unsigned char *p_addr)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ mac_l = (p_addr[4] << 8) | (p_addr[5]);
+ mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
+ (p_addr[2] << 8) | (p_addr[3] << 0);
+
+ MV_WRITE(MV64340_ETH_MAC_ADDR_LOW(eth_port_num), mac_l);
+ MV_WRITE(MV64340_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h);
+
+ /* Accept frames of this address */
+ eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR);
+
+ return;
+}
+
+/*
+ * eth_port_uc_addr - This function Set the port unicast address table
+ *
+ * DESCRIPTION:
+ * This function locates the proper entry in the Unicast table for the
+ * specified MAC nibble and sets its properties according to function
+ * parameters.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Port number.
+ * unsigned char uc_nibble Unicast MAC Address last nibble.
+ * int option 0 = Add, 1 = remove address.
+ *
+ * OUTPUT:
+ * This function add/removes MAC addresses from the port unicast address
+ * table.
+ *
+ * RETURN:
+ * true is output succeeded.
+ * false if option parameter is invalid.
+ *
+ */
+static int eth_port_uc_addr(unsigned int eth_port_num,
+ unsigned char uc_nibble, int option)
+{
+ unsigned int unicast_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Locate the Unicast table entry */
+ uc_nibble = (0xf & uc_nibble);
+ tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */
+ reg_offset = uc_nibble % 4; /* Entry offset within the above register */
+
+ switch (option) {
+ case REJECT_MAC_ADDR:
+ /* Clear accepts frame bit at specified unicast DA table entry */
+ unicast_reg = MV_READ((MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset));
+
+ unicast_reg &= (0x0E << (8 * reg_offset));
+
+ MV_WRITE(
+ (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset), unicast_reg);
+ break;
+
+ case ACCEPT_MAC_ADDR:
+ /* Set accepts frame bit at unicast DA filter table entry */
+ unicast_reg =
+ MV_READ(
+ (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset));
+
+ unicast_reg |= (0x01 << (8 * reg_offset));
+
+ MV_WRITE(
+ (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset), unicast_reg);
+
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
+ *
+ * DESCRIPTION:
+ * Go through all the DA filter tables (Unicast, Special Multicast &
+ * Other Multicast) and set each entry to 0.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * Multicast and Unicast packets are rejected.
+ *
+ * RETURN:
+ * None.
+ */
+static void eth_port_init_mac_tables(unsigned int eth_port_num)
+{
+ int table_index;
+
+ /* Clear DA filter unicast table (Ex_dFUT) */
+ for (table_index = 0; table_index <= 0xC; table_index += 4)
+ MV_WRITE(
+ (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + table_index), 0);
+
+ for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+ /* Clear DA filter special multicast table (Ex_dFSMT) */
+ MV_WRITE(
+ (MV64340_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index), 0);
+ /* Clear DA filter other multicast table (Ex_dFOMT) */
+ MV_WRITE((MV64340_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index), 0);
+ }
+}
+
+/*
+ * eth_clear_mib_counters - Clear all MIB counters
+ *
+ * DESCRIPTION:
+ * This function clears all MIB counters of a specific ethernet port.
+ * A read from the MIB counter will reset the counter.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * After reading all MIB counters, the counters resets.
+ *
+ * RETURN:
+ * MIB counter value.
+ *
+ */
+static void eth_clear_mib_counters(unsigned int eth_port_num)
+{
+ int i;
+
+ /* Perform dummy reads from MIB counters */
+ for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; i += 4)
+ MV_READ(MV64340_ETH_MIB_COUNTERS_BASE(eth_port_num) + i);
+}
+
+
+/*
+ * ethernet_phy_get - Get the ethernet port PHY address.
+ *
+ * DESCRIPTION:
+ * This routine returns the given ethernet port PHY address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * None.
+ *
+ * RETURN:
+ * PHY address.
+ *
+ */
+static int ethernet_phy_get(unsigned int eth_port_num)
+{
+ unsigned int reg_data;
+
+ reg_data = MV_READ(MV64340_ETH_PHY_ADDR_REG);
+
+ return ((reg_data >> (5 * eth_port_num)) & 0x1f);
+}
+
+/*
+ * ethernet_phy_reset - Reset Ethernet port PHY.
+ *
+ * DESCRIPTION:
+ * This routine utilize the SMI interface to reset the ethernet port PHY.
+ * The routine waits until the link is up again or link up is timeout.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * The ethernet port PHY renew its link.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static int ethernet_phy_reset(unsigned int eth_port_num)
+{
+ unsigned int time_out = 50;
+ unsigned int phy_reg_data;
+
+ /* Reset the PHY */
+ eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
+ phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
+ eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
+
+ /* Poll on the PHY LINK */
+ do {
+ eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
+
+ if (time_out-- == 0)
+ return 0;
+ } while (!(phy_reg_data & 0x20));
+
+ return 1;
+}
+
+/*
+ * eth_port_reset - Reset Ethernet port
+ *
+ * DESCRIPTION:
+ * This routine resets the chip by aborting any SDMA engine activity and
+ * clearing the MIB counters. The Receiver and the Transmit unit are in
+ * idle state after this command is performed and the port is disabled.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * Channel activity is halted.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static void eth_port_reset(unsigned int eth_port_num)
+{
+ unsigned int reg_data;
+
+ /* Stop Tx port activity. Check port Tx activity. */
+ reg_data =
+ MV_READ(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port_num));
+
+ if (reg_data & 0xFF) {
+ /* Issue stop command for active channels only */
+ MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
+ (eth_port_num), (reg_data << 8));
+
+ /* Wait for all Tx activity to terminate. */
+ do {
+ /* Check port cause register that all Tx queues are stopped */
+ reg_data =
+ MV_READ
+ (MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
+ (eth_port_num));
+ }
+ while (reg_data & 0xFF);
+ }
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ reg_data =
+ MV_READ(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
+ (eth_port_num));
+
+ if (reg_data & 0xFF) {
+ /* Issue stop command for active channels only */
+ MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
+ (eth_port_num), (reg_data << 8));
+
+ /* Wait for all Rx activity to terminate. */
+ do {
+ /* Check port cause register that all Rx queues are stopped */
+ reg_data =
+ MV_READ
+ (MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
+ (eth_port_num));
+ }
+ while (reg_data & 0xFF);
+ }
+
+
+ /* Clear all MIB counters */
+ eth_clear_mib_counters(eth_port_num);
+
+ /* Reset the Enable bit in the Configuration Register */
+ reg_data =
+ MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num));
+ reg_data &= ~ETH_SERIAL_PORT_ENABLE;
+ MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), reg_data);
+
+ return;
+}
+
+/*
+ * ethernet_set_config_reg - Set specified bits in configuration register.
+ *
+ * DESCRIPTION:
+ * This function sets specified bits in the given ethernet
+ * configuration register.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ * unsigned int value 32 bit value.
+ *
+ * OUTPUT:
+ * The set bits in the value parameter are set in the configuration
+ * register.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static void ethernet_set_config_reg(unsigned int eth_port_num,
+ unsigned int value)
+{
+ unsigned int eth_config_reg;
+
+ eth_config_reg =
+ MV_READ(MV64340_ETH_PORT_CONFIG_REG(eth_port_num));
+ eth_config_reg |= value;
+ MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
+ eth_config_reg);
+}
+
+/*
+ * ethernet_get_config_reg - Get the port configuration register
+ *
+ * DESCRIPTION:
+ * This function returns the configuration register value of the given
+ * ethernet port.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * None.
+ *
+ * RETURN:
+ * Port configuration register value.
+ */
+static unsigned int ethernet_get_config_reg(unsigned int eth_port_num)
+{
+ unsigned int eth_config_reg;
+
+ eth_config_reg = MV_READ(MV64340_ETH_PORT_CONFIG_EXTEND_REG
+ (eth_port_num));
+ return eth_config_reg;
+}
+
+
+/*
+ * eth_port_read_smi_reg - Read PHY registers
+ *
+ * DESCRIPTION:
+ * This routine utilize the SMI interface to interact with the PHY in
+ * order to perform PHY register read.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ * unsigned int phy_reg PHY register address offset.
+ * unsigned int *value Register value buffer.
+ *
+ * OUTPUT:
+ * Write the value of a specified PHY register into given buffer.
+ *
+ * RETURN:
+ * false if the PHY is busy or read data is not in valid state.
+ * true otherwise.
+ *
+ */
+static int eth_port_read_smi_reg(unsigned int eth_port_num,
+ unsigned int phy_reg, unsigned int *value)
+{
+ int phy_addr = ethernet_phy_get(eth_port_num);
+ unsigned int time_out = PHY_BUSY_TIMEOUT;
+ unsigned int reg_value;
+
+ /* first check that it is not busy */
+ do {
+ reg_value = MV_READ(MV64340_ETH_SMI_REG);
+ if (time_out-- == 0)
+ return 0;
+ } while (reg_value & ETH_SMI_BUSY);
+
+ /* not busy */
+
+ MV_WRITE(MV64340_ETH_SMI_REG,
+ (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
+
+ time_out = PHY_BUSY_TIMEOUT; /* initialize the time out var again */
+
+ do {
+ reg_value = MV_READ(MV64340_ETH_SMI_REG);
+ if (time_out-- == 0)
+ return 0;
+ } while (reg_value & ETH_SMI_READ_VALID);
+
+ /* Wait for the data to update in the SMI register */
+ for (time_out = 0; time_out < PHY_BUSY_TIMEOUT; time_out++);
+
+ reg_value = MV_READ(MV64340_ETH_SMI_REG);
+
+ *value = reg_value & 0xffff;
+
+ return 1;
+}
+
+/*
+ * eth_port_write_smi_reg - Write to PHY registers
+ *
+ * DESCRIPTION:
+ * This routine utilize the SMI interface to interact with the PHY in
+ * order to perform writes to PHY registers.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ * unsigned int phy_reg PHY register address offset.
+ * unsigned int value Register value.
+ *
+ * OUTPUT:
+ * Write the given value to the specified PHY register.
+ *
+ * RETURN:
+ * false if the PHY is busy.
+ * true otherwise.
+ *
+ */
+static int eth_port_write_smi_reg(unsigned int eth_port_num,
+ unsigned int phy_reg, unsigned int value)
+{
+ unsigned int time_out = PHY_BUSY_TIMEOUT;
+ unsigned int reg_value;
+ int phy_addr;
+
+ phy_addr = ethernet_phy_get(eth_port_num);
+
+ /* first check that it is not busy */
+ do {
+ reg_value = MV_READ(MV64340_ETH_SMI_REG);
+ if (time_out-- == 0)
+ return 0;
+ } while (reg_value & ETH_SMI_BUSY);
+
+ /* not busy */
+ MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
+ ETH_SMI_OPCODE_WRITE | (value & 0xffff));
+
+ return 1;
+}
+
+/*
+ * eth_port_send - Send an Ethernet packet
+ *
+ * DESCRIPTION:
+ * This routine send a given packet described by p_pktinfo parameter. It
+ * supports transmitting of a packet spaned over multiple buffers. The
+ * routine updates 'curr' and 'first' indexes according to the packet
+ * segment passed to the routine. In case the packet segment is first,
+ * the 'first' index is update. In any case, the 'curr' index is updated.
+ * If the routine get into Tx resource error it assigns 'curr' index as
+ * 'first'. This way the function can abort Tx process of multiple
+ * descriptors per packet.
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info User packet buffer.
+ *
+ * OUTPUT:
+ * Tx ring 'curr' and 'first' indexes are updated.
+ *
+ * RETURN:
+ * ETH_QUEUE_FULL in case of Tx resource error.
+ * ETH_ERROR in case the routine can not access Tx desc ring.
+ * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
+ * ETH_OK otherwise.
+ *
+ */
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+/*
+ * Modified to include the first descriptor pointer in case of SG
+ */
+static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp,
+ struct pkt_info * p_pkt_info)
+{
+ int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc;
+ volatile struct eth_tx_desc *current_descriptor;
+ volatile struct eth_tx_desc *first_descriptor;
+ u32 command_status, first_chip_ptr;
+
+ /* Do not process Tx ring in case of Tx ring resource error */
+ if (mp->tx_resource_err)
+ return ETH_QUEUE_FULL;
+
+ /* Get the Tx Desc ring indexes */
+ tx_desc_curr = mp->tx_curr_desc_q;
+ tx_desc_used = mp->tx_used_desc_q;
+
+ current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
+ if (current_descriptor == NULL)
+ return ETH_ERROR;
+
+ tx_next_desc = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
+ command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
+
+ if (command_status & ETH_TX_FIRST_DESC) {
+ tx_first_desc = tx_desc_curr;
+ mp->tx_first_desc_q = tx_first_desc;
+
+ /* fill first descriptor */
+ first_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
+ first_descriptor->l4i_chk = p_pkt_info->l4i_chk;
+ first_descriptor->cmd_sts = command_status;
+ first_descriptor->byte_cnt = p_pkt_info->byte_cnt;
+ first_descriptor->buf_ptr = p_pkt_info->buf_ptr;
+ first_descriptor->next_desc_ptr = mp->tx_desc_dma +
+ tx_next_desc * sizeof(struct eth_tx_desc);
+ wmb();
+ } else {
+ tx_first_desc = mp->tx_first_desc_q;
+ first_descriptor = &mp->p_tx_desc_area[tx_first_desc];
+ if (first_descriptor == NULL) {
+ printk("First desc is NULL !!\n");
+ return ETH_ERROR;
+ }
+ if (command_status & ETH_TX_LAST_DESC)
+ current_descriptor->next_desc_ptr = 0x00000000;
+ else {
+ command_status |= ETH_BUFFER_OWNED_BY_DMA;
+ current_descriptor->next_desc_ptr = mp->tx_desc_dma +
+ tx_next_desc * sizeof(struct eth_tx_desc);
+ }
+ }
+
+ if (p_pkt_info->byte_cnt < 8) {
+ printk(" < 8 problem \n");
+ return ETH_ERROR;
+ }
+
+ current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
+ current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
+ current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
+ current_descriptor->cmd_sts = command_status;
+
+ mp->tx_skb[tx_desc_curr] = (struct sk_buff*) p_pkt_info->return_info;
+
+ wmb();
+
+ /* Set last desc with DMA ownership and interrupt enable. */
+ if (command_status & ETH_TX_LAST_DESC) {
+ current_descriptor->cmd_sts = command_status |
+ ETH_TX_ENABLE_INTERRUPT |
+ ETH_BUFFER_OWNED_BY_DMA;
+
+ if (!(command_status & ETH_TX_FIRST_DESC))
+ first_descriptor->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
+ wmb();
+
+ first_chip_ptr = MV_READ(MV64340_ETH_CURRENT_SERVED_TX_DESC_PTR(mp->port_num));
+
+ /* Apply send command */
+ if (first_chip_ptr == 0x00000000)
+ MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(mp->port_num), (struct eth_tx_desc *) mp->tx_desc_dma + tx_first_desc);
+
+ ETH_ENABLE_TX_QUEUE(mp->port_num);
+
+ /*
+ * Finish Tx packet. Update first desc in case of Tx resource
+ * error */
+ tx_first_desc = tx_next_desc;
+ mp->tx_first_desc_q = tx_first_desc;
+ } else {
+ if (! (command_status & ETH_TX_FIRST_DESC) ) {
+ current_descriptor->cmd_sts = command_status;
+ wmb();
+ }
+ }
+
+ /* Check for ring index overlap in the Tx desc ring */
+ if (tx_next_desc == tx_desc_used) {
+ mp->tx_resource_err = 1;
+ mp->tx_curr_desc_q = tx_first_desc;
+
+ return ETH_QUEUE_LAST_RESOURCE;
+ }
+
+ mp->tx_curr_desc_q = tx_next_desc;
+ wmb();
+
+ return ETH_OK;
+}
+#else
+static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp,
+ struct pkt_info * p_pkt_info)
+{
+ int tx_desc_curr;
+ int tx_desc_used;
+ volatile struct eth_tx_desc* current_descriptor;
+ unsigned int command_status;
+
+ /* Do not process Tx ring in case of Tx ring resource error */
+ if (mp->tx_resource_err)
+ return ETH_QUEUE_FULL;
+
+ /* Get the Tx Desc ring indexes */
+ tx_desc_curr = mp->tx_curr_desc_q;
+ tx_desc_used = mp->tx_used_desc_q;
+ current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
+
+ if (current_descriptor == NULL)
+ return ETH_ERROR;
+
+ command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
+
+/* XXX Is this for real ?!?!? */
+ /* Buffers with a payload smaller than 8 bytes must be aligned to a
+ * 64-bit boundary. We use the memory allocated for Tx descriptor.
+ * This memory is located in TX_BUF_OFFSET_IN_DESC offset within the
+ * Tx descriptor. */
+ if (p_pkt_info->byte_cnt <= 8) {
+ printk(KERN_ERR
+ "You have failed in the < 8 bytes errata - fixme\n");
+ return ETH_ERROR;
+ }
+ current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
+ current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
+ mp->tx_skb[tx_desc_curr] = (struct sk_buff *) p_pkt_info->return_info;
+
+ mb();
+
+ /* Set last desc with DMA ownership and interrupt enable. */
+ current_descriptor->cmd_sts = command_status |
+ ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
+
+ /* Apply send command */
+ ETH_ENABLE_TX_QUEUE(mp->port_num);
+
+ /* Finish Tx packet. Update first desc in case of Tx resource error */
+ tx_desc_curr = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
+
+ /* Update the current descriptor */
+ mp->tx_curr_desc_q = tx_desc_curr;
+
+ /* Check for ring index overlap in the Tx desc ring */
+ if (tx_desc_curr == tx_desc_used) {
+ mp->tx_resource_err = 1;
+ return ETH_QUEUE_LAST_RESOURCE;
+ }
+
+ return ETH_OK;
+}
+#endif
+
+/*
+ * eth_tx_return_desc - Free all used Tx descriptors
+ *
+ * DESCRIPTION:
+ * This routine returns the transmitted packet information to the caller.
+ * It uses the 'first' index to support Tx desc return in case a transmit
+ * of a packet spanned over multiple buffer still in process.
+ * In case the Tx queue was in "resource error" condition, where there are
+ * no available Tx resources, the function resets the resource error flag.
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info User packet buffer.
+ *
+ * OUTPUT:
+ * Tx ring 'first' and 'used' indexes are updated.
+ *
+ * RETURN:
+ * ETH_ERROR in case the routine can not access Tx desc ring.
+ * ETH_RETRY in case there is transmission in process.
+ * ETH_END_OF_JOB if the routine has nothing to release.
+ * ETH_OK otherwise.
+ *
+ */
+static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private * mp,
+ struct pkt_info * p_pkt_info)
+{
+ int tx_desc_used, tx_desc_curr;
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ int tx_first_desc;
+#endif
+ volatile struct eth_tx_desc *p_tx_desc_used;
+ unsigned int command_status;
+
+ /* Get the Tx Desc ring indexes */
+ tx_desc_curr = mp->tx_curr_desc_q;
+ tx_desc_used = mp->tx_used_desc_q;
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ tx_first_desc = mp->tx_first_desc_q;
+#endif
+ p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
+
+ /* XXX Sanity check */
+ if (p_tx_desc_used == NULL)
+ return ETH_ERROR;
+
+ command_status = p_tx_desc_used->cmd_sts;
+
+ /* Still transmitting... */
+#ifndef MV64340_CHECKSUM_OFFLOAD_TX
+ if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
+ return ETH_RETRY;
+#endif
+ /* Stop release. About to overlap the current available Tx descriptor */
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ if (tx_desc_used == tx_first_desc && !mp->tx_resource_err)
+ return ETH_END_OF_JOB;
+#else
+ if (tx_desc_used == tx_desc_curr && !mp->tx_resource_err)
+ return ETH_END_OF_JOB;
+#endif
+
+ /* Pass the packet information to the caller */
+ p_pkt_info->cmd_sts = command_status;
+ p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
+ mp->tx_skb[tx_desc_used] = NULL;
+
+ /* Update the next descriptor to release. */
+ mp->tx_used_desc_q = (tx_desc_used + 1) % MV64340_TX_QUEUE_SIZE;
+
+ /* Any Tx return cancels the Tx resource error status */
+ mp->tx_resource_err = 0;
+
+ return ETH_OK;
+}
+
+/*
+ * eth_port_receive - Get received information from Rx ring.
+ *
+ * DESCRIPTION:
+ * This routine returns the received data to the caller. There is no
+ * data copying during routine operation. All information is returned
+ * using pointer to packet information struct passed from the caller.
+ * If the routine exhausts Rx ring resources then the resource error flag
+ * is set.
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info User packet buffer.
+ *
+ * OUTPUT:
+ * Rx ring current and used indexes are updated.
+ *
+ * RETURN:
+ * ETH_ERROR in case the routine can not access Rx desc ring.
+ * ETH_QUEUE_FULL if Rx ring resources are exhausted.
+ * ETH_END_OF_JOB if there is no received data.
+ * ETH_OK otherwise.
+ */
+static ETH_FUNC_RET_STATUS eth_port_receive(struct mv64340_private * mp,
+ struct pkt_info * p_pkt_info)
+{
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ volatile struct eth_rx_desc * p_rx_desc;
+ unsigned int command_status;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (mp->rx_resource_err)
+ return ETH_QUEUE_FULL;
+
+ /* Get the Rx Desc ring 'curr and 'used' indexes */
+ rx_curr_desc = mp->rx_curr_desc_q;
+ rx_used_desc = mp->rx_used_desc_q;
+
+ p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc];
+
+ /* The following parameters are used to save readings from memory */
+ command_status = p_rx_desc->cmd_sts;
+
+ /* Nothing to receive... */
+ if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
+ return ETH_END_OF_JOB;
+
+ p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
+ p_pkt_info->cmd_sts = command_status;
+ p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET;
+ p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
+ p_pkt_info->l4i_chk = p_rx_desc->buf_size;
+
+ /* Clean the return info field to indicate that the packet has been */
+ /* moved to the upper layers */
+ mp->rx_skb[rx_curr_desc] = NULL;
+
+ /* Update current index in data structure */
+ rx_next_curr_desc = (rx_curr_desc + 1) % MV64340_RX_QUEUE_SIZE;
+ mp->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ mp->rx_resource_err = 1;
+
+ mb();
+ return ETH_OK;
+}
+
+/*
+ * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
+ *
+ * DESCRIPTION:
+ * This routine returns a Rx buffer back to the Rx ring. It retrieves the
+ * next 'used' descriptor and attached the returned buffer to it.
+ * In case the Rx ring was in "resource error" condition, where there are
+ * no available Rx resources, the function resets the resource error flag.
+ *
+ * INPUT:
+ * struct mv64340_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info Information on the returned buffer.
+ *
+ * OUTPUT:
+ * New available Rx resource in Rx descriptor ring.
+ *
+ * RETURN:
+ * ETH_ERROR in case the routine can not access Rx desc ring.
+ * ETH_OK otherwise.
+ */
+static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv64340_private * mp,
+ struct pkt_info * p_pkt_info)
+{
+ int used_rx_desc; /* Where to return Rx resource */
+ volatile struct eth_rx_desc* p_used_rx_desc;
+
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = mp->rx_used_desc_q;
+ p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc];
+
+ p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
+ p_used_rx_desc->buf_size = p_pkt_info->byte_cnt;
+ mp->rx_skb[used_rx_desc] = p_pkt_info->return_info;
+
+ /* Flush the write pipe */
+ mb();
+
+ /* Return the descriptor to DMA ownership */
+ p_used_rx_desc->cmd_sts =
+ ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
+
+ /* Flush descriptor and CPU pipe */
+ mb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ mp->rx_used_desc_q = (used_rx_desc + 1) % MV64340_RX_QUEUE_SIZE;
+
+ /* Any Rx return cancels the Rx resource error status */
+ mp->rx_resource_err = 0;
+
+ return ETH_OK;
+}
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
new file mode 100644
index 00000000000000..46a057d0c31f4a
--- /dev/null
+++ b/drivers/net/mv643xx_eth.h
@@ -0,0 +1,601 @@
+#ifndef __MV64340_ETH_H__
+#define __MV64340_ETH_H__
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/mv643xx.h>
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+/*
+ * The first part is the high level driver of the gigE ethernet ports.
+ */
+
+#define ETH_PORT0_IRQ_NUM 48 /* main high register, bit0 */
+#define ETH_PORT1_IRQ_NUM ETH_PORT0_IRQ_NUM+1 /* main high register, bit1 */
+#define ETH_PORT2_IRQ_NUM ETH_PORT0_IRQ_NUM+2 /* main high register, bit1 */
+
+/* Checksum offload for Tx works */
+#define MV64340_CHECKSUM_OFFLOAD_TX
+#define MV64340_NAPI
+#define MV64340_TX_FAST_REFILL
+#undef MV64340_COAL
+
+/*
+ * Number of RX / TX descriptors on RX / TX rings.
+ * Note that allocating RX descriptors is done by allocating the RX
+ * ring AND a preallocated RX buffers (skb's) for each descriptor.
+ * The TX descriptors only allocates the TX descriptors ring,
+ * with no pre allocated TX buffers (skb's are allocated by higher layers.
+ */
+
+/* Default TX ring size is 1000 descriptors */
+#define MV64340_TX_QUEUE_SIZE 1000
+
+/* Default RX ring size is 400 descriptors */
+#define MV64340_RX_QUEUE_SIZE 400
+
+#define MV64340_TX_COAL 100
+#ifdef MV64340_COAL
+#define MV64340_RX_COAL 100
+#endif
+
+
+/*
+ * The second part is the low level driver of the gigE ethernet ports. *
+ */
+
+
+/*
+ * Header File for : MV-643xx network interface header
+ *
+ * DESCRIPTION:
+ * This header file contains macros typedefs and function declaration for
+ * the Marvell Gig Bit Ethernet Controller.
+ *
+ * DEPENDENCIES:
+ * None.
+ *
+ */
+
+/* Default port configuration value */
+#define PORT_CONFIG_VALUE \
+ ETH_UNICAST_NORMAL_MODE | \
+ ETH_DEFAULT_RX_QUEUE_0 | \
+ ETH_DEFAULT_RX_ARP_QUEUE_0 | \
+ ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP | \
+ ETH_RECEIVE_BC_IF_IP | \
+ ETH_RECEIVE_BC_IF_ARP | \
+ ETH_CAPTURE_TCP_FRAMES_DIS | \
+ ETH_CAPTURE_UDP_FRAMES_DIS | \
+ ETH_DEFAULT_RX_TCP_QUEUE_0 | \
+ ETH_DEFAULT_RX_UDP_QUEUE_0 | \
+ ETH_DEFAULT_RX_BPDU_QUEUE_0
+
+/* Default port extend configuration value */
+#define PORT_CONFIG_EXTEND_VALUE \
+ ETH_SPAN_BPDU_PACKETS_AS_NORMAL | \
+ ETH_PARTITION_DISABLE
+
+
+/* Default sdma control value */
+#define PORT_SDMA_CONFIG_VALUE \
+ ETH_RX_BURST_SIZE_16_64BIT | \
+ GT_ETH_IPG_INT_RX(0) | \
+ ETH_TX_BURST_SIZE_16_64BIT;
+
+#define GT_ETH_IPG_INT_RX(value) \
+ ((value & 0x3fff) << 8)
+
+/* Default port serial control value */
+#define PORT_SERIAL_CONTROL_VALUE \
+ ETH_FORCE_LINK_PASS | \
+ ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \
+ ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
+ ETH_ADV_SYMMETRIC_FLOW_CTRL | \
+ ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
+ ETH_FORCE_BP_MODE_NO_JAM | \
+ BIT9 | \
+ ETH_DO_NOT_FORCE_LINK_FAIL | \
+ ETH_RETRANSMIT_16_ATTEMPTS | \
+ ETH_ENABLE_AUTO_NEG_SPEED_GMII | \
+ ETH_DTE_ADV_0 | \
+ ETH_DISABLE_AUTO_NEG_BYPASS | \
+ ETH_AUTO_NEG_NO_CHANGE | \
+ ETH_MAX_RX_PACKET_9700BYTE | \
+ ETH_CLR_EXT_LOOPBACK | \
+ ETH_SET_FULL_DUPLEX_MODE | \
+ ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
+
+#define RX_BUFFER_MAX_SIZE 0x4000000
+#define TX_BUFFER_MAX_SIZE 0x4000000
+
+/* MAC accepet/reject macros */
+#define ACCEPT_MAC_ADDR 0
+#define REJECT_MAC_ADDR 1
+
+/* Buffer offset from buffer pointer */
+#define RX_BUF_OFFSET 0x2
+
+/* Gigabit Ethernet Unit Global Registers */
+
+/* MIB Counters register definitions */
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
+#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
+#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
+#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
+#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
+#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
+#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
+#define ETH_MIB_FRAMES_64_OCTETS 0x20
+#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
+#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
+#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
+#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
+#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
+#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
+#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
+#define ETH_MIB_GOOD_FRAMES_SENT 0x40
+#define ETH_MIB_EXCESSIVE_COLLISION 0x44
+#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
+#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
+#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
+#define ETH_MIB_FC_SENT 0x54
+#define ETH_MIB_GOOD_FC_RECEIVED 0x58
+#define ETH_MIB_BAD_FC_RECEIVED 0x5c
+#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
+#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
+#define ETH_MIB_OVERSIZE_RECEIVED 0x68
+#define ETH_MIB_JABBER_RECEIVED 0x6c
+#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
+#define ETH_MIB_BAD_CRC_EVENT 0x74
+#define ETH_MIB_COLLISION 0x78
+#define ETH_MIB_LATE_COLLISION 0x7c
+
+/* Port serial status reg (PSR) */
+#define ETH_INTERFACE_GMII_MII 0
+#define ETH_INTERFACE_PCM BIT0
+#define ETH_LINK_IS_DOWN 0
+#define ETH_LINK_IS_UP BIT1
+#define ETH_PORT_AT_HALF_DUPLEX 0
+#define ETH_PORT_AT_FULL_DUPLEX BIT2
+#define ETH_RX_FLOW_CTRL_DISABLED 0
+#define ETH_RX_FLOW_CTRL_ENBALED BIT3
+#define ETH_GMII_SPEED_100_10 0
+#define ETH_GMII_SPEED_1000 BIT4
+#define ETH_MII_SPEED_10 0
+#define ETH_MII_SPEED_100 BIT5
+#define ETH_NO_TX 0
+#define ETH_TX_IN_PROGRESS BIT7
+#define ETH_BYPASS_NO_ACTIVE 0
+#define ETH_BYPASS_ACTIVE BIT8
+#define ETH_PORT_NOT_AT_PARTITION_STATE 0
+#define ETH_PORT_AT_PARTITION_STATE BIT9
+#define ETH_PORT_TX_FIFO_NOT_EMPTY 0
+#define ETH_PORT_TX_FIFO_EMPTY BIT10
+
+
+/* These macros describes the Port configuration reg (Px_cR) bits */
+#define ETH_UNICAST_NORMAL_MODE 0
+#define ETH_UNICAST_PROMISCUOUS_MODE BIT0
+#define ETH_DEFAULT_RX_QUEUE_0 0
+#define ETH_DEFAULT_RX_QUEUE_1 BIT1
+#define ETH_DEFAULT_RX_QUEUE_2 BIT2
+#define ETH_DEFAULT_RX_QUEUE_3 (BIT2 | BIT1)
+#define ETH_DEFAULT_RX_QUEUE_4 BIT3
+#define ETH_DEFAULT_RX_QUEUE_5 (BIT3 | BIT1)
+#define ETH_DEFAULT_RX_QUEUE_6 (BIT3 | BIT2)
+#define ETH_DEFAULT_RX_QUEUE_7 (BIT3 | BIT2 | BIT1)
+#define ETH_DEFAULT_RX_ARP_QUEUE_0 0
+#define ETH_DEFAULT_RX_ARP_QUEUE_1 BIT4
+#define ETH_DEFAULT_RX_ARP_QUEUE_2 BIT5
+#define ETH_DEFAULT_RX_ARP_QUEUE_3 (BIT5 | BIT4)
+#define ETH_DEFAULT_RX_ARP_QUEUE_4 BIT6
+#define ETH_DEFAULT_RX_ARP_QUEUE_5 (BIT6 | BIT4)
+#define ETH_DEFAULT_RX_ARP_QUEUE_6 (BIT6 | BIT5)
+#define ETH_DEFAULT_RX_ARP_QUEUE_7 (BIT6 | BIT5 | BIT4)
+#define ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP 0
+#define ETH_REJECT_BC_IF_NOT_IP_OR_ARP BIT7
+#define ETH_RECEIVE_BC_IF_IP 0
+#define ETH_REJECT_BC_IF_IP BIT8
+#define ETH_RECEIVE_BC_IF_ARP 0
+#define ETH_REJECT_BC_IF_ARP BIT9
+#define ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY BIT12
+#define ETH_CAPTURE_TCP_FRAMES_DIS 0
+#define ETH_CAPTURE_TCP_FRAMES_EN BIT14
+#define ETH_CAPTURE_UDP_FRAMES_DIS 0
+#define ETH_CAPTURE_UDP_FRAMES_EN BIT15
+#define ETH_DEFAULT_RX_TCP_QUEUE_0 0
+#define ETH_DEFAULT_RX_TCP_QUEUE_1 BIT16
+#define ETH_DEFAULT_RX_TCP_QUEUE_2 BIT17
+#define ETH_DEFAULT_RX_TCP_QUEUE_3 (BIT17 | BIT16)
+#define ETH_DEFAULT_RX_TCP_QUEUE_4 BIT18
+#define ETH_DEFAULT_RX_TCP_QUEUE_5 (BIT18 | BIT16)
+#define ETH_DEFAULT_RX_TCP_QUEUE_6 (BIT18 | BIT17)
+#define ETH_DEFAULT_RX_TCP_QUEUE_7 (BIT18 | BIT17 | BIT16)
+#define ETH_DEFAULT_RX_UDP_QUEUE_0 0
+#define ETH_DEFAULT_RX_UDP_QUEUE_1 BIT19
+#define ETH_DEFAULT_RX_UDP_QUEUE_2 BIT20
+#define ETH_DEFAULT_RX_UDP_QUEUE_3 (BIT20 | BIT19)
+#define ETH_DEFAULT_RX_UDP_QUEUE_4 (BIT21
+#define ETH_DEFAULT_RX_UDP_QUEUE_5 (BIT21 | BIT19)
+#define ETH_DEFAULT_RX_UDP_QUEUE_6 (BIT21 | BIT20)
+#define ETH_DEFAULT_RX_UDP_QUEUE_7 (BIT21 | BIT20 | BIT19)
+#define ETH_DEFAULT_RX_BPDU_QUEUE_0 0
+#define ETH_DEFAULT_RX_BPDU_QUEUE_1 BIT22
+#define ETH_DEFAULT_RX_BPDU_QUEUE_2 BIT23
+#define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22)
+#define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24
+#define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22)
+#define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23)
+#define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22)
+
+
+/* These macros describes the Port configuration extend reg (Px_cXR) bits*/
+#define ETH_CLASSIFY_EN BIT0
+#define ETH_SPAN_BPDU_PACKETS_AS_NORMAL 0
+#define ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 BIT1
+#define ETH_PARTITION_DISABLE 0
+#define ETH_PARTITION_ENABLE BIT2
+
+
+/* Tx/Rx queue command reg (RQCR/TQCR)*/
+#define ETH_QUEUE_0_ENABLE BIT0
+#define ETH_QUEUE_1_ENABLE BIT1
+#define ETH_QUEUE_2_ENABLE BIT2
+#define ETH_QUEUE_3_ENABLE BIT3
+#define ETH_QUEUE_4_ENABLE BIT4
+#define ETH_QUEUE_5_ENABLE BIT5
+#define ETH_QUEUE_6_ENABLE BIT6
+#define ETH_QUEUE_7_ENABLE BIT7
+#define ETH_QUEUE_0_DISABLE BIT8
+#define ETH_QUEUE_1_DISABLE BIT9
+#define ETH_QUEUE_2_DISABLE BIT10
+#define ETH_QUEUE_3_DISABLE BIT11
+#define ETH_QUEUE_4_DISABLE BIT12
+#define ETH_QUEUE_5_DISABLE BIT13
+#define ETH_QUEUE_6_DISABLE BIT14
+#define ETH_QUEUE_7_DISABLE BIT15
+
+
+/* These macros describes the Port Sdma configuration reg (SDCR) bits */
+#define ETH_RIFB BIT0
+#define ETH_RX_BURST_SIZE_1_64BIT 0
+#define ETH_RX_BURST_SIZE_2_64BIT BIT1
+#define ETH_RX_BURST_SIZE_4_64BIT BIT2
+#define ETH_RX_BURST_SIZE_8_64BIT (BIT2 | BIT1)
+#define ETH_RX_BURST_SIZE_16_64BIT BIT3
+#define ETH_BLM_RX_NO_SWAP BIT4
+#define ETH_BLM_RX_BYTE_SWAP 0
+#define ETH_BLM_TX_NO_SWAP BIT5
+#define ETH_BLM_TX_BYTE_SWAP 0
+#define ETH_DESCRIPTORS_BYTE_SWAP BIT6
+#define ETH_DESCRIPTORS_NO_SWAP 0
+#define ETH_TX_BURST_SIZE_1_64BIT 0
+#define ETH_TX_BURST_SIZE_2_64BIT BIT22
+#define ETH_TX_BURST_SIZE_4_64BIT BIT23
+#define ETH_TX_BURST_SIZE_8_64BIT (BIT23 | BIT22)
+#define ETH_TX_BURST_SIZE_16_64BIT BIT24
+
+
+
+/* These macros describes the Port serial control reg (PSCR) bits */
+#define ETH_SERIAL_PORT_DISABLE 0
+#define ETH_SERIAL_PORT_ENABLE BIT0
+#define ETH_FORCE_LINK_PASS BIT1
+#define ETH_DO_NOT_FORCE_LINK_PASS 0
+#define ETH_ENABLE_AUTO_NEG_FOR_DUPLX 0
+#define ETH_DISABLE_AUTO_NEG_FOR_DUPLX BIT2
+#define ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL 0
+#define ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL BIT3
+#define ETH_ADV_NO_FLOW_CTRL 0
+#define ETH_ADV_SYMMETRIC_FLOW_CTRL BIT4
+#define ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0
+#define ETH_FORCE_FC_MODE_TX_PAUSE_DIS BIT5
+#define ETH_FORCE_BP_MODE_NO_JAM 0
+#define ETH_FORCE_BP_MODE_JAM_TX BIT7
+#define ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR BIT8
+#define ETH_FORCE_LINK_FAIL 0
+#define ETH_DO_NOT_FORCE_LINK_FAIL BIT10
+#define ETH_RETRANSMIT_16_ATTEMPTS 0
+#define ETH_RETRANSMIT_FOREVER BIT11
+#define ETH_DISABLE_AUTO_NEG_SPEED_GMII BIT13
+#define ETH_ENABLE_AUTO_NEG_SPEED_GMII 0
+#define ETH_DTE_ADV_0 0
+#define ETH_DTE_ADV_1 BIT14
+#define ETH_DISABLE_AUTO_NEG_BYPASS 0
+#define ETH_ENABLE_AUTO_NEG_BYPASS BIT15
+#define ETH_AUTO_NEG_NO_CHANGE 0
+#define ETH_RESTART_AUTO_NEG BIT16
+#define ETH_MAX_RX_PACKET_1518BYTE 0
+#define ETH_MAX_RX_PACKET_1522BYTE BIT17
+#define ETH_MAX_RX_PACKET_1552BYTE BIT18
+#define ETH_MAX_RX_PACKET_9022BYTE (BIT18 | BIT17)
+#define ETH_MAX_RX_PACKET_9192BYTE BIT19
+#define ETH_MAX_RX_PACKET_9700BYTE (BIT19 | BIT17)
+#define ETH_SET_EXT_LOOPBACK BIT20
+#define ETH_CLR_EXT_LOOPBACK 0
+#define ETH_SET_FULL_DUPLEX_MODE BIT21
+#define ETH_SET_HALF_DUPLEX_MODE 0
+#define ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX BIT22
+#define ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0
+#define ETH_SET_GMII_SPEED_TO_10_100 0
+#define ETH_SET_GMII_SPEED_TO_1000 BIT23
+#define ETH_SET_MII_SPEED_TO_10 0
+#define ETH_SET_MII_SPEED_TO_100 BIT24
+
+
+/* SMI reg */
+#define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */
+#define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */
+#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read operation */
+#define ETH_SMI_OPCODE_READ BIT26 /* Operation is in progress */
+
+/* SDMA command status fields macros */
+
+/* Tx & Rx descriptors status */
+#define ETH_ERROR_SUMMARY (BIT0)
+
+/* Tx & Rx descriptors command */
+#define ETH_BUFFER_OWNED_BY_DMA (BIT31)
+
+/* Tx descriptors status */
+#define ETH_LC_ERROR (0 )
+#define ETH_UR_ERROR (BIT1 )
+#define ETH_RL_ERROR (BIT2 )
+#define ETH_LLC_SNAP_FORMAT (BIT9 )
+
+/* Rx descriptors status */
+#define ETH_CRC_ERROR (0 )
+#define ETH_OVERRUN_ERROR (BIT1 )
+#define ETH_MAX_FRAME_LENGTH_ERROR (BIT2 )
+#define ETH_RESOURCE_ERROR ((BIT2 | BIT1))
+#define ETH_VLAN_TAGGED (BIT19)
+#define ETH_BPDU_FRAME (BIT20)
+#define ETH_TCP_FRAME_OVER_IP_V_4 (0 )
+#define ETH_UDP_FRAME_OVER_IP_V_4 (BIT21)
+#define ETH_OTHER_FRAME_TYPE (BIT22)
+#define ETH_LAYER_2_IS_ETH_V_2 (BIT23)
+#define ETH_FRAME_TYPE_IP_V_4 (BIT24)
+#define ETH_FRAME_HEADER_OK (BIT25)
+#define ETH_RX_LAST_DESC (BIT26)
+#define ETH_RX_FIRST_DESC (BIT27)
+#define ETH_UNKNOWN_DESTINATION_ADDR (BIT28)
+#define ETH_RX_ENABLE_INTERRUPT (BIT29)
+#define ETH_LAYER_4_CHECKSUM_OK (BIT30)
+
+/* Rx descriptors byte count */
+#define ETH_FRAME_FRAGMENTED (BIT2)
+
+/* Tx descriptors command */
+#define ETH_LAYER_4_CHECKSUM_FIRST_DESC (BIT10)
+#define ETH_FRAME_SET_TO_VLAN (BIT15)
+#define ETH_TCP_FRAME (0 )
+#define ETH_UDP_FRAME (BIT16)
+#define ETH_GEN_TCP_UDP_CHECKSUM (BIT17)
+#define ETH_GEN_IP_V_4_CHECKSUM (BIT18)
+#define ETH_ZERO_PADDING (BIT19)
+#define ETH_TX_LAST_DESC (BIT20)
+#define ETH_TX_FIRST_DESC (BIT21)
+#define ETH_GEN_CRC (BIT22)
+#define ETH_TX_ENABLE_INTERRUPT (BIT23)
+#define ETH_AUTO_MODE (BIT30)
+
+/* typedefs */
+
+typedef enum _eth_func_ret_status {
+ ETH_OK, /* Returned as expected. */
+ ETH_ERROR, /* Fundamental error. */
+ ETH_RETRY, /* Could not process request. Try later. */
+ ETH_END_OF_JOB, /* Ring has nothing to process. */
+ ETH_QUEUE_FULL, /* Ring resource error. */
+ ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
+} ETH_FUNC_RET_STATUS;
+
+typedef enum _eth_target {
+ ETH_TARGET_DRAM,
+ ETH_TARGET_DEVICE,
+ ETH_TARGET_CBS,
+ ETH_TARGET_PCI0,
+ ETH_TARGET_PCI1
+} ETH_TARGET;
+
+/* These are for big-endian machines. Little endian needs different
+ * definitions.
+ */
+#if defined(__BIG_ENDIAN)
+struct eth_rx_desc {
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 cmd_sts; /* Descriptor command status */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+};
+
+struct eth_tx_desc {
+ u16 byte_cnt; /* buffer byte count */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u32 cmd_sts; /* Command/status field */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+};
+
+#elif defined(__LITTLE_ENDIAN)
+struct eth_rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 buf_size; /* Buffer size */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct eth_tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+/* Unified struct for Rx and Tx operations. The user is not required to */
+/* be familier with neither Tx nor Rx descriptors. */
+struct pkt_info {
+ unsigned short byte_cnt; /* Descriptor buffer byte count */
+ unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
+ unsigned int cmd_sts; /* Descriptor command status */
+ dma_addr_t buf_ptr; /* Descriptor buffer pointer */
+ struct sk_buff * return_info; /* User resource return information */
+};
+
+
+/* Ethernet port specific infomation */
+
+struct mv64340_private {
+ int port_num; /* User Ethernet port number */
+ u8 port_mac_addr[6]; /* User defined port MAC address. */
+ u32 port_config; /* User port configuration value */
+ u32 port_config_extend; /* User port config extend value */
+ u32 port_sdma_config; /* User port SDMA config value */
+ u32 port_serial_control; /* User port serial control value */
+ u32 port_tx_queue_command; /* Port active Tx queues summary */
+ u32 port_rx_queue_command; /* Port active Rx queues summary */
+
+ int rx_resource_err; /* Rx ring resource error flag */
+ int tx_resource_err; /* Tx ring resource error flag */
+
+ /* Tx/Rx rings managment indexes fields. For driver use */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+#ifdef MV64340_CHECKSUM_OFFLOAD_TX
+ int tx_first_desc_q;
+#endif
+
+#ifdef MV64340_TX_FAST_REFILL
+ u32 tx_clean_threshold;
+#endif
+
+ volatile struct eth_rx_desc * p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ unsigned int rx_desc_area_size;
+ struct sk_buff * rx_skb[MV64340_RX_QUEUE_SIZE];
+
+ volatile struct eth_tx_desc * p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ unsigned int tx_desc_area_size;
+ struct sk_buff * tx_skb[MV64340_TX_QUEUE_SIZE];
+
+ struct work_struct tx_timeout_task;
+
+ /*
+ * Former struct mv64340_eth_priv members start here
+ */
+ struct net_device_stats stats;
+ spinlock_t lock;
+ /* Size of Tx Ring per queue */
+ unsigned int tx_ring_size;
+ /* Ammont of SKBs outstanding on Tx queue */
+ unsigned int tx_ring_skbs;
+ /* Size of Rx Ring per queue */
+ unsigned int rx_ring_size;
+ /* Ammount of SKBs allocated to Rx Ring per queue */
+ unsigned int rx_ring_skbs;
+
+ /*
+ * rx_task used to fill RX ring out of bottom half context
+ */
+ struct work_struct rx_task;
+
+ /*
+ * Used in case RX Ring is empty, which can be caused when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ long rx_task_busy __attribute__ ((aligned(SMP_CACHE_BYTES)));
+ unsigned rx_timer_flag;
+
+ u32 rx_int_coal;
+ u32 tx_int_coal;
+};
+
+/* ethernet.h API list */
+
+/* Port operation control routines */
+static void eth_port_init(struct mv64340_private *mp);
+static void eth_port_reset(unsigned int eth_port_num);
+static int eth_port_start(struct mv64340_private *mp);
+
+static void ethernet_set_config_reg(unsigned int eth_port_num,
+ unsigned int value);
+static unsigned int ethernet_get_config_reg(unsigned int eth_port_num);
+
+/* Port MAC address routines */
+static void eth_port_uc_addr_set(unsigned int eth_port_num,
+ unsigned char *p_addr);
+
+/* PHY and MIB routines */
+static int ethernet_phy_reset(unsigned int eth_port_num);
+
+static int eth_port_write_smi_reg(unsigned int eth_port_num,
+ unsigned int phy_reg,
+ unsigned int value);
+
+static int eth_port_read_smi_reg(unsigned int eth_port_num,
+ unsigned int phy_reg,
+ unsigned int *value);
+
+static void eth_clear_mib_counters(unsigned int eth_port_num);
+
+/* Port data flow control routines */
+static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private *mp,
+ struct pkt_info * p_pkt_info);
+static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private *mp,
+ struct pkt_info * p_pkt_info);
+static ETH_FUNC_RET_STATUS eth_port_receive(struct mv64340_private *mp,
+ struct pkt_info * p_pkt_info);
+static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv64340_private *mp,
+ struct pkt_info * p_pkt_info);
+
+#endif /* __MV64340_ETH_H__ */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 88c626afec04e7..55b974eae26563 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -750,6 +750,9 @@ static void netdev_error(struct net_device *dev, int intr_status);
static void netdev_rx(struct net_device *dev);
static void netdev_tx_done(struct net_device *dev);
static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void natsemi_poll_controller(struct net_device *dev);
+#endif
static void __set_rx_mode(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
static void __get_stats(struct net_device *dev);
@@ -920,6 +923,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
dev->do_ioctl = &netdev_ioctl;
dev->tx_timeout = &tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &natsemi_poll_controller;
+#endif
if (mtu)
dev->mtu = mtu;
@@ -2364,6 +2370,15 @@ static struct net_device_stats *get_stats(struct net_device *dev)
return &np->stats;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void natsemi_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ intr_handler(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
#define HASH_TABLE 0x200
static void __set_rx_mode(struct net_device *dev)
{
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index e686cb44096b8b..0b3bb03d85019b 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -7,7 +7,7 @@
Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
May 20 2002 - Add link status force-mode and TBI mode support.
=========================================================================
- 1. The media can be forced in 5 modes.
+ 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
Command: 'insmod r8169 media = SET_MEDIA'
Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
@@ -41,6 +41,7 @@ VERSION 1.2 <2002/11/30>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
+#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
@@ -64,6 +65,14 @@ VERSION 1.2 <2002/11/30>
#define dprintk(fmt, args...) do {} while (0)
#endif /* RTL8169_DEBUG */
+#ifdef CONFIG_R8169_NAPI
+#define rtl8169_rx_skb netif_receive_skb
+#define rtl8169_rx_quota(count, quota) min(count, quota)
+#else
+#define rtl8169_rx_skb netif_rx
+#define rtl8169_rx_quota(count, quota) count
+#endif
+
/* media options */
#define MAX_UNITS 8
static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
@@ -90,15 +99,16 @@ static int multicast_filter_limit = 32;
#define RxPacketMaxSize 0x0800 /* Maximum size supported is 16K-1 */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
+#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
-#define NUM_RX_DESC 64 /* Number of Rx descriptor registers */
+#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
#define RX_BUF_SIZE 1536 /* Rx Buffer size */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL_MIN_IO_SIZE 0x80
#define RTL8169_TX_TIMEOUT (6*HZ)
-#define RTL8169_PHY_TIMEOUT (HZ)
+#define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */
#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
@@ -194,7 +204,7 @@ enum RTL8169_register_content {
SWInt = 0x0100,
TxDescUnavail = 0x80,
RxFIFOOver = 0x40,
- RxUnderrun = 0x20,
+ LinkChg = 0x20,
RxOverflow = 0x10,
TxErr = 0x08,
TxOK = 0x04,
@@ -233,6 +243,14 @@ enum RTL8169_register_content {
TxInterFrameGapShift = 24,
TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+ /* TBICSR p.28 */
+ TBIReset = 0x80000000,
+ TBILoopback = 0x40000000,
+ TBINwEnable = 0x20000000,
+ TBINwRestart = 0x10000000,
+ TBILinkOk = 0x02000000,
+ TBINwComplete = 0x01000000,
+
/* CPlusCmd p.31 */
RxVlan = (1 << 6),
RxChkSum = (1 << 5),
@@ -306,10 +324,10 @@ struct RxDesc {
};
struct rtl8169_private {
- void *mmio_addr; /* memory map physical address */
+ void *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
struct net_device_stats stats; /* statistics of net device */
- spinlock_t lock; /* spin lock flag */
+ spinlock_t lock; /* spin lock flag */
int chipset;
int mac_version;
int phy_version;
@@ -317,15 +335,23 @@ struct rtl8169_private {
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_rx;
u32 dirty_tx;
- struct TxDesc *TxDescArray; /* Index of 256-alignment Tx Descriptor buffer */
- struct RxDesc *RxDescArray; /* Index of 256-alignment Rx Descriptor buffer */
+ struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
+ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
dma_addr_t RxPhyAddr;
struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
- struct sk_buff *Tx_skbuff[NUM_TX_DESC]; /* Index of Transmit data buffer */
+ struct sk_buff *Tx_skbuff[NUM_TX_DESC]; /* Tx data buffers */
struct timer_list timer;
- unsigned long phy_link_down_cnt;
u16 cp_cmd;
+ u16 intr_mask;
+ int phy_auto_nego_reg;
+ int phy_1000_ctrl_reg;
+
+ int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
+ void (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*phy_reset_enable)(void *);
+ unsigned int (*phy_reset_pending)(void *);
+ unsigned int (*link_ok)(void *);
};
MODULE_AUTHOR("Realtek");
@@ -344,9 +370,14 @@ static int rtl8169_close(struct net_device *dev);
static void rtl8169_set_rx_mode(struct net_device *dev);
static void rtl8169_tx_timeout(struct net_device *dev);
static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev);
+#ifdef CONFIG_R8169_NAPI
+static int rtl8169_poll(struct net_device *dev, int *budget);
+#endif
static const u16 rtl8169_intr_mask =
- RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
+ LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
+static const u16 rtl8169_napi_event =
+ RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
static const unsigned int rtl8169_rx_config =
(RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
@@ -364,11 +395,9 @@ static void mdio_write(void *ioaddr, int RegAddr, int value)
for (i = 2000; i > 0; i--) {
// Check if the RTL8169 has completed writing to the specified MII register
- if (!(RTL_R32(PHYAR) & 0x80000000)) {
+ if (!(RTL_R32(PHYAR) & 0x80000000))
break;
- } else {
- udelay(100);
- }
+ udelay(100);
}
}
@@ -390,18 +419,264 @@ static int mdio_read(void *ioaddr, int RegAddr)
return value;
}
+static unsigned int rtl8169_tbi_reset_pending(void *ioaddr)
+{
+ return RTL_R32(TBICSR) & TBIReset;
+}
+
+static unsigned int rtl8169_xmii_reset_pending(void *ioaddr)
+{
+ return mdio_read(ioaddr, 0) & 0x8000;
+}
+
+static unsigned int rtl8169_tbi_link_ok(void *ioaddr)
+{
+ return RTL_R32(TBICSR) & TBILinkOk;
+}
+
+static unsigned int rtl8169_xmii_link_ok(void *ioaddr)
+{
+ return RTL_R8(PHYstatus) & LinkStatus;
+}
+
+static void rtl8169_tbi_reset_enable(void *ioaddr)
+{
+ RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
+}
+
+static void rtl8169_xmii_reset_enable(void *ioaddr)
+{
+ unsigned int val;
+
+ val = (mdio_read(ioaddr, PHY_CTRL_REG) | 0x8000) & 0xffff;
+ mdio_write(ioaddr, PHY_CTRL_REG, val);
+}
+
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp, void *ioaddr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (tp->link_ok(ioaddr)) {
+ netif_carrier_on(dev);
+ printk(KERN_INFO PFX "%s: link up\n", dev->name);
+ } else
+ netif_carrier_off(dev);
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
+{
+ struct {
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 media;
+ } link_settings[] = {
+ { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
+ { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
+ { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
+ { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
+ { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
+ /* Make TBI happy */
+ { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
+ }, *p;
+ unsigned char option;
+
+ option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
+
+ if ((option != 0xff) && !idx)
+ printk(KERN_WARNING PFX "media option is deprecated.\n");
+
+ for (p = link_settings; p->media != 0xff; p++) {
+ if (p->media == option)
+ break;
+ }
+ *autoneg = p->autoneg;
+ *speed = p->speed;
+ *duplex = p->duplex;
+}
+
static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
strcpy(info->driver, RTL8169_DRIVER_NAME);
strcpy(info->version, RTL8169_VERSION );
strcpy(info->bus_info, pci_name(tp->pci_dev));
}
+static int rtl8169_set_speed_tbi(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ int ret = 0;
+ u32 reg;
+
+ reg = RTL_R32(TBICSR);
+ if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
+ (duplex == DUPLEX_FULL)) {
+ RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
+ } else if (autoneg == AUTONEG_ENABLE)
+ RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
+ else {
+ printk(KERN_WARNING PFX
+ "%s: incorrect speed setting refused in TBI mode\n",
+ dev->name);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtl8169_set_speed_xmii(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ int auto_nego, giga_ctrl;
+
+ auto_nego = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
+ auto_nego &= ~(PHY_Cap_10_Half | PHY_Cap_10_Full |
+ PHY_Cap_100_Half | PHY_Cap_100_Full);
+ giga_ctrl = mdio_read(ioaddr, PHY_1000_CTRL_REG);
+ giga_ctrl &= ~(PHY_Cap_1000_Full | PHY_Cap_Null);
+
+ if (autoneg == AUTONEG_ENABLE) {
+ auto_nego |= (PHY_Cap_10_Half | PHY_Cap_10_Full |
+ PHY_Cap_100_Half | PHY_Cap_100_Full);
+ giga_ctrl |= PHY_Cap_1000_Full;
+ } else {
+ if (speed == SPEED_10)
+ auto_nego |= PHY_Cap_10_Half | PHY_Cap_10_Full;
+ else if (speed == SPEED_100)
+ auto_nego |= PHY_Cap_100_Half | PHY_Cap_100_Full;
+ else if (speed == SPEED_1000)
+ giga_ctrl |= PHY_Cap_1000_Full;
+
+ if (duplex == DUPLEX_HALF)
+ auto_nego &= ~(PHY_Cap_10_Full | PHY_Cap_100_Full);
+ }
+
+ tp->phy_auto_nego_reg = auto_nego;
+ tp->phy_1000_ctrl_reg = giga_ctrl;
+
+ mdio_write(ioaddr, PHY_AUTO_NEGO_REG, auto_nego);
+ mdio_write(ioaddr, PHY_1000_CTRL_REG, giga_ctrl);
+ mdio_write(ioaddr, PHY_CTRL_REG, PHY_Enable_Auto_Nego |
+ PHY_Restart_Auto_Nego);
+ return 0;
+}
+
+static int rtl8169_set_speed(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
+
+ ret = tp->set_speed(dev, autoneg, speed, duplex);
+
+ if (netif_running(dev) && (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
+ mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
+
+ return ret;
+}
+
+static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return ret;
+}
+
+static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ u32 status;
+
+ cmd->supported =
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ status = RTL_R32(TBICSR);
+ cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
+ cmd->autoneg = !!(status & TBINwEnable);
+
+ cmd->speed = SPEED_1000;
+ cmd->duplex = DUPLEX_FULL; /* Always set */
+}
+
+static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ u8 status;
+
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP;
+
+ cmd->autoneg = 1;
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+
+ if (tp->phy_auto_nego_reg & PHY_Cap_10_Half)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (tp->phy_auto_nego_reg & PHY_Cap_10_Full)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (tp->phy_auto_nego_reg & PHY_Cap_100_Half)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (tp->phy_auto_nego_reg & PHY_Cap_100_Full)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full)
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+
+ status = RTL_R8(PHYstatus);
+
+ if (status & _1000bpsF)
+ cmd->speed = SPEED_1000;
+ else if (status & _100bps)
+ cmd->speed = SPEED_100;
+ else if (status & _10bps)
+ cmd->speed = SPEED_10;
+
+ cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+}
+
+static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ tp->get_settings(dev, cmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ return 0;
+}
+
+
static struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = rtl8169_get_settings,
+ .set_settings = rtl8169_set_settings,
};
static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum,
@@ -500,7 +775,7 @@ static void rtl8169_print_phy_version(struct rtl8169_private *tp)
static void rtl8169_hw_phy_config(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
struct {
u16 regs[5]; /* Beware of bit-sign propagation */
@@ -566,61 +841,47 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
}
-static void rtl8169_hw_phy_reset(struct net_device *dev)
-{
- struct rtl8169_private *tp = dev->priv;
- void *ioaddr = tp->mmio_addr;
- int i, val;
-
- printk(KERN_WARNING PFX "%s: Reset RTL8169s PHY\n", dev->name);
-
- val = (mdio_read(ioaddr, 0) | 0x8000) & 0xffff;
- mdio_write(ioaddr, 0, val);
-
- for (i = 50; i >= 0; i--) {
- if (!(mdio_read(ioaddr, 0) & 0x8000))
- break;
- udelay(100); /* Gross */
- }
-
- if (i < 0) {
- printk(KERN_WARNING PFX "%s: no PHY Reset ack. Giving up.\n",
- dev->name);
- }
-}
-
static void rtl8169_phy_timer(unsigned long __opaque)
{
struct net_device *dev = (struct net_device *)__opaque;
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
void *ioaddr = tp->mmio_addr;
+ unsigned long timeout = RTL8169_PHY_TIMEOUT;
assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
assert(tp->phy_version < RTL_GIGA_PHY_VER_G);
- if (RTL_R8(PHYstatus) & LinkStatus)
- tp->phy_link_down_cnt = 0;
- else {
- tp->phy_link_down_cnt++;
- if (tp->phy_link_down_cnt >= 12) {
- int reg;
+ if (!(tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
+ return;
- // If link on 1000, perform phy reset.
- reg = mdio_read(ioaddr, PHY_1000_CTRL_REG);
- if (reg & PHY_Cap_1000_Full)
- rtl8169_hw_phy_reset(dev);
+ spin_lock_irq(&tp->lock);
- tp->phy_link_down_cnt = 0;
- }
+ if (tp->phy_reset_pending(ioaddr)) {
+ /*
+ * A busy loop could burn quite a few cycles on nowadays CPU.
+ * Let's delay the execution of the timer for a few ticks.
+ */
+ timeout = HZ/10;
+ goto out_mod_timer;
}
- mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
+ if (tp->link_ok(ioaddr))
+ goto out_unlock;
+
+ printk(KERN_WARNING PFX "%s: PHY reset until link up\n", dev->name);
+
+ tp->phy_reset_enable(ioaddr);
+
+out_mod_timer:
+ mod_timer(timer, jiffies + timeout);
+out_unlock:
+ spin_unlock_irq(&tp->lock);
}
static inline void rtl8169_delete_timer(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
@@ -628,21 +889,17 @@ static inline void rtl8169_delete_timer(struct net_device *dev)
return;
del_timer_sync(timer);
-
- tp->phy_link_down_cnt = 0;
}
static inline void rtl8169_request_timer(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
(tp->phy_version >= RTL_GIGA_PHY_VER_G))
return;
- tp->phy_link_down_cnt = 0;
-
init_timer(timer);
timer->expires = jiffies + RTL8169_PHY_TIMEOUT;
timer->data = (unsigned long)(dev);
@@ -681,7 +938,7 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
// enable device (incl. PCI PM wakeup and hotplug setup)
rc = pci_enable_device(pdev);
if (rc) {
- printk(KERN_ERR PFX "%s: unable to enable device\n", pdev->slot_name);
+ printk(KERN_ERR PFX "%s: enable failure\n", pdev->slot_name);
goto err_out;
}
@@ -693,7 +950,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
} else {
- printk(KERN_ERR PFX "Cannot find PowerManagement capability, aborting.\n");
+ printk(KERN_ERR PFX
+ "Cannot find PowerManagement capability, aborting.\n");
goto err_out_free_res;
}
@@ -718,7 +976,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
rc = pci_request_regions(pdev, MODULENAME);
if (rc) {
- printk(KERN_ERR PFX "%s: Could not request regions.\n", pdev->slot_name);
+ printk(KERN_ERR PFX "%s: could not request regions.\n",
+ pdev->slot_name);
goto err_out_disable;
}
@@ -800,8 +1059,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void *ioaddr = NULL;
static int board_idx = -1;
static int printed_version = 0;
+ u8 autoneg, duplex;
+ u16 speed;
int i, rc;
- int option = -1, Cap10_100 = 0, Cap1000 = 0;
assert(pdev != NULL);
assert(ent != NULL);
@@ -822,6 +1082,22 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
assert(dev != NULL);
assert(tp != NULL);
+ if (RTL_R8(PHYstatus) & TBI_Enable) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+ tp->phy_reset_enable = rtl8169_tbi_reset_enable;
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+
+ tp->phy_1000_ctrl_reg = PHY_Cap_1000_Full; /* Implied by TBI */
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
+ }
+
// Get MAC address. FIXME: read EEPROM
for (i = 0; i < MAC_ADDR_LEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
@@ -836,9 +1112,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
-// dev->do_ioctl = mii_ioctl;
-
- tp = dev->priv; // private data //
+#ifdef CONFIG_R8169_NAPI
+ dev->poll = rtl8169_poll;
+ dev->weight = R8169_NAPI_WEIGHT;
+ printk(KERN_INFO PFX "NAPI enabled\n");
+#endif
+ tp->intr_mask = 0xffff;
tp->pci_dev = pdev;
tp->mmio_addr = ioaddr;
@@ -885,95 +1164,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
}
- // if TBI is not endbled
- if (!(RTL_R8(PHYstatus) & TBI_Enable)) {
- int val = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
-
- option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
- // Force RTL8169 in 10/100/1000 Full/Half mode.
- if (option > 0) {
- printk(KERN_INFO "%s: Force-mode Enabled.\n",
- dev->name);
- Cap10_100 = 0, Cap1000 = 0;
- switch (option) {
- case _10_Half:
- Cap10_100 = PHY_Cap_10_Half_Or_Less;
- Cap1000 = PHY_Cap_Null;
- break;
- case _10_Full:
- Cap10_100 = PHY_Cap_10_Full_Or_Less;
- Cap1000 = PHY_Cap_Null;
- break;
- case _100_Half:
- Cap10_100 = PHY_Cap_100_Half_Or_Less;
- Cap1000 = PHY_Cap_Null;
- break;
- case _100_Full:
- Cap10_100 = PHY_Cap_100_Full_Or_Less;
- Cap1000 = PHY_Cap_Null;
- break;
- case _1000_Full:
- Cap10_100 = PHY_Cap_100_Full_Or_Less;
- Cap1000 = PHY_Cap_1000_Full;
- break;
- default:
- break;
- }
- mdio_write(ioaddr, PHY_AUTO_NEGO_REG, Cap10_100 | (val & 0x1F)); //leave PHY_AUTO_NEGO_REG bit4:0 unchanged
- mdio_write(ioaddr, PHY_1000_CTRL_REG, Cap1000);
- } else {
- printk(KERN_INFO "%s: Auto-negotiation Enabled.\n",
- dev->name);
-
- // enable 10/100 Full/Half Mode, leave PHY_AUTO_NEGO_REG bit4:0 unchanged
- mdio_write(ioaddr, PHY_AUTO_NEGO_REG,
- PHY_Cap_100_Full_Or_Less | (val & 0x1f));
+ rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
- // enable 1000 Full Mode
- mdio_write(ioaddr, PHY_1000_CTRL_REG,
- PHY_Cap_1000_Full);
-
- }
-
- // Enable auto-negotiation and restart auto-nigotiation
- mdio_write(ioaddr, PHY_CTRL_REG,
- PHY_Enable_Auto_Nego | PHY_Restart_Auto_Nego);
- udelay(100);
-
- // wait for auto-negotiation process
- for (i = 10000; i > 0; i--) {
- //check if auto-negotiation complete
- if (mdio_read(ioaddr, PHY_STAT_REG) &
- PHY_Auto_Neco_Comp) {
- udelay(100);
- option = RTL_R8(PHYstatus);
- if (option & _1000bpsF) {
- printk(KERN_INFO
- "%s: 1000Mbps Full-duplex operation.\n",
- dev->name);
- } else {
- printk(KERN_INFO
- "%s: %sMbps %s-duplex operation.\n",
- dev->name,
- (option & _100bps) ? "100" :
- "10",
- (option & FullDup) ? "Full" :
- "Half");
- }
- break;
- } else {
- udelay(100);
- }
- } // end for-loop to wait for auto-negotiation process
-
- } else {
- udelay(100);
- printk(KERN_INFO
- "%s: 1000Mbps Full-duplex operation, TBI Link %s!\n",
- dev->name,
- (RTL_R32(TBICSR) & TBILinkOK) ? "OK" : "Failed");
-
- }
+ rtl8169_set_speed(dev, autoneg, speed, duplex);
+
+ if (RTL_R8(PHYstatus) & TBI_Enable)
+ printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
return 0;
}
@@ -982,7 +1178,7 @@ static void __devexit
rtl8169_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
assert(dev != NULL);
assert(tp != NULL);
@@ -1001,7 +1197,7 @@ rtl8169_remove_one(struct pci_dev *pdev)
static int rtl8169_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
@@ -1042,7 +1238,7 @@ static int rtl8169_resume(struct pci_dev *pdev)
static int
rtl8169_open(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
int retval;
@@ -1074,6 +1270,8 @@ rtl8169_open(struct net_device *dev)
rtl8169_hw_start(dev);
rtl8169_request_timer(dev);
+
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
out:
return retval;
@@ -1091,7 +1289,7 @@ err_free_irq:
static void
rtl8169_hw_start(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 i;
@@ -1102,8 +1300,7 @@ rtl8169_hw_start(struct net_device *dev)
for (i = 1000; i > 0; i--) {
if ((RTL_R8(ChipCmd) & CmdReset) == 0)
break;
- else
- udelay(10);
+ udelay(10);
}
RTL_W8(Cfg9346, Cfg9346_Unlock);
@@ -1114,8 +1311,8 @@ rtl8169_hw_start(struct net_device *dev)
RTL_W16(RxMaxSize, RxPacketMaxSize);
// Set Rx Config register
- i = rtl8169_rx_config | (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].
- RxConfigMask);
+ i = rtl8169_rx_config |
+ (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
RTL_W32(RxConfig, i);
/* Set DMA burst size and Interframe Gap Time */
@@ -1126,7 +1323,8 @@ rtl8169_hw_start(struct net_device *dev)
RTL_W16(CPlusCmd, tp->cp_cmd);
if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
- dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14 MUST be 1\n");
+ dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
+ "Bit-3 and bit-14 MUST be 1\n");
tp->cp_cmd |= (1 << 14) | PCIMulRW;
RTL_W16(CPlusCmd, tp->cp_cmd);
}
@@ -1151,7 +1349,6 @@ rtl8169_hw_start(struct net_device *dev)
RTL_W16(IntrMask, rtl8169_intr_mask);
netif_start_queue(dev);
-
}
static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
@@ -1248,7 +1445,7 @@ static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
static int rtl8169_init_ring(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
tp->cur_rx = tp->dirty_rx = 0;
tp->cur_tx = tp->dirty_tx = 0;
@@ -1302,10 +1499,11 @@ rtl8169_tx_clear(struct rtl8169_private *tp)
static void
rtl8169_tx_timeout(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u8 tmp8;
+ printk(KERN_INFO "%s: TX Timeout\n", dev->name);
/* disable Tx, if not already */
tmp8 = RTL_R8(ChipCmd);
if (tmp8 & CmdTxEnb)
@@ -1328,9 +1526,9 @@ rtl8169_tx_timeout(struct net_device *dev)
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
- int entry = tp->cur_tx % NUM_TX_DESC;
+ unsigned int entry = tp->cur_tx % NUM_TX_DESC;
u32 len = skb->len;
if (unlikely(skb->len < ETH_ZLEN)) {
@@ -1340,10 +1538,9 @@ rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = ETH_ZLEN;
}
- spin_lock_irq(&tp->lock);
-
if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
dma_addr_t mapping;
+ u32 status;
mapping = pci_map_single(tp->pci_dev, skb->data, len,
PCI_DMA_TODEVICE);
@@ -1351,24 +1548,30 @@ rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
tp->Tx_skbuff[entry] = skb;
tp->TxDescArray[entry].addr = cpu_to_le64(mapping);
- tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
- LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));
+ /* anti gcc 2.95.3 bugware */
+ status = OWNbit | FSbit | LSbit | len |
+ (EORbit * !((entry + 1) % NUM_TX_DESC));
+ tp->TxDescArray[entry].status = cpu_to_le32(status);
RTL_W8(TxPoll, 0x40); //set polling bit
dev->trans_start = jiffies;
tp->cur_tx++;
+ smp_wmb();
} else
goto err_drop;
-
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
+ u32 dirty = tp->dirty_tx;
+
netif_stop_queue(dev);
+ smp_rmb();
+ if (dirty != tp->dirty_tx)
+ netif_wake_queue(dev);
}
-out:
- spin_unlock_irq(&tp->lock);
+out:
return 0;
err_drop:
@@ -1382,17 +1585,18 @@ static void
rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void *ioaddr)
{
- unsigned long dirty_tx, tx_left;
+ unsigned int dirty_tx, tx_left;
assert(dev != NULL);
assert(tp != NULL);
assert(ioaddr != NULL);
dirty_tx = tp->dirty_tx;
+ smp_rmb();
tx_left = tp->cur_tx - dirty_tx;
while (tx_left > 0) {
- int entry = dirty_tx % NUM_TX_DESC;
+ unsigned int entry = dirty_tx % NUM_TX_DESC;
struct sk_buff *skb = tp->Tx_skbuff[entry];
u32 status;
@@ -1415,6 +1619,7 @@ rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
+ smp_wmb();
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
}
@@ -1442,11 +1647,11 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
return ret;
}
-static void
+static int
rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void *ioaddr)
{
- unsigned long cur_rx, rx_left;
+ unsigned int cur_rx, rx_left, count;
int delta;
assert(dev != NULL);
@@ -1455,9 +1660,10 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
cur_rx = tp->cur_rx;
rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+ rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
while (rx_left > 0) {
- int entry = cur_rx % NUM_RX_DESC;
+ unsigned int entry = cur_rx % NUM_RX_DESC;
u32 status;
rmb();
@@ -1494,7 +1700,7 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
+ rtl8169_rx_skb(skb);
dev->last_rx = jiffies;
tp->stats.rx_bytes += pkt_size;
@@ -1505,13 +1711,15 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
rx_left--;
}
+ count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (delta > 0)
- tp->dirty_rx += delta;
- else if (delta < 0)
+ if (delta < 0) {
printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+ delta = 0;
+ }
+ tp->dirty_rx += delta;
/*
* FIXME: until there is periodic timer to try and refill the ring,
@@ -1522,6 +1730,8 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
*/
if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
+
+ return count;
}
/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
@@ -1529,7 +1739,7 @@ static irqreturn_t
rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_instance;
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
int status = 0;
@@ -1543,26 +1753,37 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
break;
handled = 1;
-/*
- if (status & RxUnderrun)
- link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
-*/
+
+ status &= tp->intr_mask;
RTL_W16(IntrStatus,
(status & RxFIFOOver) ? (status | RxOverflow) : status);
if (!(status & rtl8169_intr_mask))
break;
+ if (status & LinkChg)
+ rtl8169_check_link_status(dev, tp, ioaddr);
+
+#ifdef CONFIG_R8169_NAPI
+ RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
+ tp->intr_mask = ~rtl8169_napi_event;
+
+ if (likely(netif_rx_schedule_prep(dev)))
+ __netif_rx_schedule(dev);
+ else {
+ printk(KERN_INFO "%s: interrupt %x taken in poll\n",
+ dev->name, status);
+ }
+ break;
+#else
// Rx interrupt
- if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) {
+ if (status & (RxOK | RxOverflow | RxFIFOOver)) {
rtl8169_rx_interrupt(dev, tp, ioaddr);
}
// Tx interrupt
- if (status & (TxOK | TxErr)) {
- spin_lock(&tp->lock);
+ if (status & (TxOK | TxErr))
rtl8169_tx_interrupt(dev, tp, ioaddr);
- spin_unlock(&tp->lock);
- }
+#endif
boguscnt--;
} while (boguscnt > 0);
@@ -1576,10 +1797,40 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
return IRQ_RETVAL(handled);
}
+#ifdef CONFIG_R8169_NAPI
+static int rtl8169_poll(struct net_device *dev, int *budget)
+{
+ unsigned int work_done, work_to_do = min(*budget, dev->quota);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+
+ work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
+ rtl8169_tx_interrupt(dev, tp, ioaddr);
+
+ *budget -= work_done;
+ dev->quota -= work_done;
+
+ if ((work_done < work_to_do) || !netif_running(dev)) {
+ netif_rx_complete(dev);
+ tp->intr_mask = 0xffff;
+ /*
+ * 20040426: the barrier is not strictly required but the
+ * behavior of the irq handler could be less predictable
+ * without it. Btw, the lack of flush for the posted pci
+ * write is safe - FR
+ */
+ smp_wmb();
+ RTL_W16(IntrMask, rtl8169_intr_mask);
+ }
+
+ return (work_done >= work_to_do);
+}
+#endif
+
static int
rtl8169_close(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
void *ioaddr = tp->mmio_addr;
@@ -1621,7 +1872,7 @@ rtl8169_close(struct net_device *dev)
static void
rtl8169_set_rx_mode(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
u32 mc_filter[2]; /* Multicast hash filter */
@@ -1655,10 +1906,8 @@ rtl8169_set_rx_mode(struct net_device *dev)
spin_lock_irqsave(&tp->lock, flags);
- tmp =
- rtl8169_rx_config | rx_mode | (RTL_R32(RxConfig) &
- rtl_chip_info[tp->chipset].
- RxConfigMask);
+ tmp = rtl8169_rx_config | rx_mode |
+ (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
RTL_W32(RxConfig, tmp);
RTL_W32(MAR0 + 0, mc_filter[0]);
@@ -1675,7 +1924,7 @@ rtl8169_set_rx_mode(struct net_device *dev)
*/
static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
{
- struct rtl8169_private *tp = dev->priv;
+ struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
diff --git a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h
index 0790e7b7a5c144..c80ac3c552e895 100644
--- a/drivers/net/sk98lin/h/skdrv2nd.h
+++ b/drivers/net/sk98lin/h/skdrv2nd.h
@@ -53,60 +53,6 @@
#include "h/skrlmt.h"
#include "h/skgedrv.h"
-#define SK_PCI_ISCOMPLIANT(result, pdev) { \
- result = SK_FALSE; /* default */ \
- /* 3Com (0x10b7) */ \
- if (pdev->vendor == 0x10b7) { \
- /* Gigabit Ethernet Adapter (0x1700) */ \
- if ((pdev->device == 0x1700) || \
- (pdev->device == 0x80eb)) { \
- result = SK_TRUE; \
- } \
- /* SysKonnect (0x1148) */ \
- } else if (pdev->vendor == 0x1148) { \
- /* SK-98xx Gigabit Ethernet Server Adapter (0x4300) */ \
- /* SK-98xx V2.0 Gigabit Ethernet Adapter (0x4320) */ \
- if ((pdev->device == 0x4300) || \
- (pdev->device == 0x4320)) { \
- result = SK_TRUE; \
- } \
- /* D-Link (0x1186) */ \
- } else if (pdev->vendor == 0x1186) { \
- /* Gigabit Ethernet Adapter (0x4c00) */ \
- if ((pdev->device == 0x4c00)) { \
- result = SK_TRUE; \
- } \
- /* Marvell (0x11ab) */ \
- } else if (pdev->vendor == 0x11ab) { \
- /* Gigabit Ethernet Adapter (0x4320) */ \
- /* Gigabit Ethernet Adapter (0x4360) */ \
- /* Gigabit Ethernet Adapter (0x4361) */ \
- /* Belkin (0x5005) */ \
- if ((pdev->device == 0x4320) || \
- (pdev->device == 0x4360) || \
- (pdev->device == 0x4361) || \
- (pdev->device == 0x5005)) { \
- result = SK_TRUE; \
- } \
- /* CNet (0x1371) */ \
- } else if (pdev->vendor == 0x1371) { \
- /* GigaCard Network Adapter (0x434e) */ \
- if ((pdev->device == 0x434e)) { \
- result = SK_TRUE; \
- } \
- /* Linksys (0x1737) */ \
- } else if (pdev->vendor == 0x1737) { \
- /* Gigabit Network Adapter (0x1032) */ \
- /* Gigabit Network Adapter (0x1064) */ \
- if ((pdev->device == 0x1032) || \
- (pdev->device == 0x1064)) { \
- result = SK_TRUE; \
- } \
- } else { \
- result = SK_FALSE; \
- } \
-}
-
extern SK_MBUF *SkDrvAllocRlmtMbuf(SK_AC*, SK_IOC, unsigned);
extern void SkDrvFreeRlmtMbuf(SK_AC*, SK_IOC, SK_MBUF*);
diff --git a/drivers/net/sk98lin/skaddr.c b/drivers/net/sk98lin/skaddr.c
index 5a7ce8ef873433..abf8576030ad51 100644
--- a/drivers/net/sk98lin/skaddr.c
+++ b/drivers/net/sk98lin/skaddr.c
@@ -892,7 +892,7 @@ SK_AC *pAC, /* adapter context */
SK_IOC IoC, /* I/O context */
SK_U32 PortNumber) /* Port Number */
{
- int ReturnCode;
+ int ReturnCode = 0;
#if (!defined(SK_SLIM) || defined(DEBUG))
if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
return (SK_ADDR_ILLEGAL_PORT);
@@ -1424,7 +1424,7 @@ SK_IOC IoC, /* I/O context */
SK_U32 PortNumber, /* port whose promiscuous mode changes */
int NewPromMode) /* new promiscuous mode */
{
- int ReturnCode;
+ int ReturnCode = 0;
#if (!defined(SK_SLIM) || defined(DEBUG))
if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
return (SK_ADDR_ILLEGAL_PORT);
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 503fdff54bc164..7488071bfcbc06 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -239,7 +239,7 @@ static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr);
#ifdef CONFIG_PROC_FS
static const char SK_Root_Dir_entry[] = "sk98lin";
-static struct proc_dir_entry *pSkRootDir = NULL;
+static struct proc_dir_entry *pSkRootDir;
extern struct file_operations sk_proc_fops;
#endif
@@ -255,303 +255,13 @@ static void DumpLong(char*, int);
#endif
/* global variables *********************************************************/
-static const char *BootString = BOOT_STRING;
struct SK_NET_DEVICE *SkGeRootDev = NULL;
-static int probed __initdata = 0;
static SK_BOOL DoPrintInterfaceChange = SK_TRUE;
/* local variables **********************************************************/
static uintptr_t TxQueueAddr[SK_MAX_MACS][2] = {{0x680, 0x600},{0x780, 0x700}};
static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480};
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *pSkRootDir;
-#endif
-
-
-
-/*****************************************************************************
- *
- * skge_probe - find all SK-98xx adapters
- *
- * Description:
- * This function scans the PCI bus for SK-98xx adapters. Resources for
- * each adapter are allocated and the adapter is brought into Init 1
- * state.
- *
- * Returns:
- * 0, if everything is ok
- * !=0, on error
- */
-static int __init skge_probe (void)
-{
- int boards_found = 0;
- int vendor_flag = SK_FALSE;
- SK_AC *pAC;
- DEV_NET *pNet = NULL;
- struct pci_dev *pdev = NULL;
- struct SK_NET_DEVICE *dev = NULL;
- SK_BOOL DeviceFound = SK_FALSE;
- SK_BOOL BootStringCount = SK_FALSE;
- int retval;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *pProcFile;
-#endif
-
- if (probed)
- return -ENODEV;
- probed++;
-
-
- while((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
-
- if (pci_enable_device(pdev)) {
- continue;
- }
- dev = NULL;
- pNet = NULL;
-
- /* Don't handle Yukon2 cards at the moment */
- /* 12-feb-2004 ---- mlindner@syskonnect.de */
- if (pdev->vendor == 0x11ab) {
- if ( (pdev->device == 0x4360) || (pdev->device == 0x4361) )
- continue;
- }
-
- SK_PCI_ISCOMPLIANT(vendor_flag, pdev);
- if (!vendor_flag)
- continue;
-
- /* Configure DMA attributes. */
- if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL) &&
- pci_set_dma_mask(pdev, (u64) 0xffffffff))
- continue;
-
-
- if ((dev = alloc_etherdev(sizeof(DEV_NET))) == NULL) {
- printk(KERN_ERR "Unable to allocate etherdev "
- "structure!\n");
- break;
- }
-
- pNet = dev->priv;
- pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL);
- if (pNet->pAC == NULL){
- free_netdev(dev);
- printk(KERN_ERR "Unable to allocate adapter "
- "structure!\n");
- break;
- }
-
- /* Print message */
- if (!BootStringCount) {
- /* set display flag to TRUE so that */
- /* we only display this string ONCE */
- BootStringCount = SK_TRUE;
- printk("%s\n", BootString);
- }
-
- memset(pNet->pAC, 0, sizeof(SK_AC));
- pAC = pNet->pAC;
- pAC->PciDev = pdev;
- pAC->PciDevId = pdev->device;
- pAC->dev[0] = dev;
- pAC->dev[1] = dev;
- sprintf(pAC->Name, "SysKonnect SK-98xx");
- pAC->CheckQueue = SK_FALSE;
-
- pNet->Mtu = 1500;
- pNet->Up = 0;
- dev->irq = pdev->irq;
- retval = SkGeInitPCI(pAC);
- if (retval) {
- printk("SKGE: PCI setup failed: %i\n", retval);
- free_netdev(dev);
- continue;
- }
-
- SET_MODULE_OWNER(dev);
- dev->open = &SkGeOpen;
- dev->stop = &SkGeClose;
- dev->hard_start_xmit = &SkGeXmit;
- dev->get_stats = &SkGeStats;
- dev->last_stats = &SkGeStats;
- dev->set_multicast_list = &SkGeSetRxMode;
- dev->set_mac_address = &SkGeSetMacAddr;
- dev->do_ioctl = &SkGeIoctl;
- dev->change_mtu = &SkGeChangeMtu;
- dev->flags &= ~IFF_RUNNING;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
-#ifdef SK_ZEROCOPY
-#ifdef USE_SK_TX_CHECKSUM
-
- if (pAC->ChipsetType) {
- /* Use only if yukon hardware */
- /* SK and ZEROCOPY - fly baby... */
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- }
-#endif
-#endif
-
- pAC->Index = boards_found;
-
- if (SkGeBoardInit(dev, pAC)) {
- free_netdev(dev);
- continue;
- }
-
- /* Register net device */
- if (register_netdev(dev)) {
- printk(KERN_ERR "SKGE: Could not register device.\n");
- FreeResources(dev);
- free_netdev(dev);
- continue;
- }
-
- /* Print adapter specific string from vpd */
- ProductStr(pAC);
- printk("%s: %s\n", dev->name, pAC->DeviceStr);
-
- /* Print configuration settings */
- printk(" PrefPort:%c RlmtMode:%s\n",
- 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber,
- (pAC->RlmtMode==0) ? "Check Link State" :
- ((pAC->RlmtMode==1) ? "Check Link State" :
- ((pAC->RlmtMode==3) ? "Check Local Port" :
- ((pAC->RlmtMode==7) ? "Check Segmentation" :
- ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error")))));
-
- SkGeYellowLED(pAC, pAC->IoBase, 1);
-
-
- memcpy((caddr_t) &dev->dev_addr,
- (caddr_t) &pAC->Addr.Net[0].CurrentMacAddress, 6);
-
- /* First adapter... Create proc and print message */
-#ifdef CONFIG_PROC_FS
- if (!DeviceFound) {
- DeviceFound = SK_TRUE;
- SK_MEMCPY(&SK_Root_Dir_entry, BootString,
- sizeof(SK_Root_Dir_entry) - 1);
-
- /*Create proc (directory)*/
- if(!pSkRootDir) {
- pSkRootDir = proc_mkdir(SK_Root_Dir_entry, proc_net);
- if (!pSkRootDir) {
- printk(KERN_WARNING "%s: Unable to create /proc/net/%s",
- dev->name, SK_Root_Dir_entry);
- } else {
- pSkRootDir->owner = THIS_MODULE;
- }
- }
- }
-
- /* Create proc file */
- if (pSkRootDir &&
- (pProcFile = create_proc_entry(dev->name, S_IRUGO,
- pSkRootDir))) {
- pProcFile->proc_fops = &sk_proc_fops;
- pProcFile->data = dev;
- }
-
-#endif
-
- pNet->PortNr = 0;
- pNet->NetNr = 0;
-
- boards_found++;
-
- /* More then one port found */
- if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
- if ((dev = alloc_etherdev(sizeof(DEV_NET))) == 0) {
- printk(KERN_ERR "Unable to allocate etherdev "
- "structure!\n");
- break;
- }
-
- pAC->dev[1] = dev;
- pNet = dev->priv;
- pNet->PortNr = 1;
- pNet->NetNr = 1;
- pNet->pAC = pAC;
- pNet->Mtu = 1500;
- pNet->Up = 0;
-
- dev->open = &SkGeOpen;
- dev->stop = &SkGeClose;
- dev->hard_start_xmit = &SkGeXmit;
- dev->get_stats = &SkGeStats;
- dev->last_stats = &SkGeStats;
- dev->set_multicast_list = &SkGeSetRxMode;
- dev->set_mac_address = &SkGeSetMacAddr;
- dev->do_ioctl = &SkGeIoctl;
- dev->change_mtu = &SkGeChangeMtu;
- dev->flags &= ~IFF_RUNNING;
-
-#ifdef SK_ZEROCOPY
-#ifdef USE_SK_TX_CHECKSUM
- if (pAC->ChipsetType) {
- /* SG and ZEROCOPY - fly baby... */
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- }
-#endif
-#endif
-
- if (register_netdev(dev)) {
- printk(KERN_ERR "SKGE: Could not register device.\n");
- free_netdev(dev);
- pAC->dev[1] = pAC->dev[0];
- } else {
-#ifdef CONFIG_PROC_FS
- if (pSkRootDir
- && (pProcFile = create_proc_entry(dev->name,
- S_IRUGO, pSkRootDir))) {
- pProcFile->proc_fops = &sk_proc_fops;
- pProcFile->data = dev;
- }
-#endif
-
- memcpy((caddr_t) &dev->dev_addr,
- (caddr_t) &pAC->Addr.Net[1].CurrentMacAddress, 6);
-
- printk("%s: %s\n", dev->name, pAC->DeviceStr);
- printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
- }
- }
-
- /* Save the hardware revision */
- pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) +
- (pAC->GIni.GIPciHwRev & 0x0F);
-
- /* Set driver globals */
- pAC->Pnmi.pDriverFileName = DRIVER_FILE_NAME;
- pAC->Pnmi.pDriverReleaseDate = DRIVER_REL_DATE;
-
- SK_MEMSET(&(pAC->PnmiBackup), 0, sizeof(SK_PNMI_STRUCT_DATA));
- SK_MEMCPY(&(pAC->PnmiBackup), &(pAC->PnmiStruct),
- sizeof(SK_PNMI_STRUCT_DATA));
-
- /*
- * This is bollocks, but we need to tell the net-init
- * code that it shall go for the next device.
- */
-#ifndef MODULE
- dev->base_addr = 0;
-#endif
- }
-
- /*
- * If we're at this point we're going through skge_probe() for
- * the first time. Return success (0) if we've initialized 1
- * or more boards. Otherwise, return failure (-ENODEV).
- */
-
- return boards_found;
-} /* skge_probe */
-
-
/*****************************************************************************
*
* SkGeInitPCI - Init the PCI resources
@@ -666,9 +376,6 @@ MODULE_PARM(Role_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s");
MODULE_PARM(ConType, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s");
MODULE_PARM(PrefPort, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s");
MODULE_PARM(RlmtMode, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s");
-/* not used, just there because every driver should have them: */
-MODULE_PARM(options, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "i");
-MODULE_PARM(debug, "i");
/* used for interrupt moderation */
MODULE_PARM(IntsPerSec, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "i");
MODULE_PARM(Moderation, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s");
@@ -755,123 +462,12 @@ static char *RlmtMode[SK_MAX_CARD_PARAM] = RLMT_MODE;
static char *RlmtMode[SK_MAX_CARD_PARAM] = {"", };
#endif
-static int debug = 0; /* not used */
-static int options[SK_MAX_CARD_PARAM] = {0, }; /* not used */
-
static int IntsPerSec[SK_MAX_CARD_PARAM];
static char *Moderation[SK_MAX_CARD_PARAM];
static char *ModerationMask[SK_MAX_CARD_PARAM];
static char *AutoSizing[SK_MAX_CARD_PARAM];
static char *Stats[SK_MAX_CARD_PARAM];
-
-/*****************************************************************************
- *
- * skge_init_module - module initialization function
- *
- * Description:
- * Very simple, only call skge_probe and return approriate result.
- *
- * Returns:
- * 0, if everything is ok
- * !=0, on error
- */
-static int __init skge_init_module(void)
-{
- int cards;
- SkGeRootDev = NULL;
-
- /* just to avoid warnings ... */
- debug = 0;
- options[0] = 0;
-
- cards = skge_probe();
- if (cards == 0) {
- printk("sk98lin: No adapter found.\n");
- }
- return cards ? 0 : -ENODEV;
-} /* skge_init_module */
-
-
-/*****************************************************************************
- *
- * skge_cleanup_module - module unload function
- *
- * Description:
- * Disable adapter if it is still running, free resources,
- * free device struct.
- *
- * Returns: N/A
- */
-static void __exit skge_cleanup_module(void)
-{
-DEV_NET *pNet;
-SK_AC *pAC;
-struct SK_NET_DEVICE *next;
-unsigned long Flags;
-SK_EVPARA EvPara;
-
- while (SkGeRootDev) {
- pNet = (DEV_NET*) SkGeRootDev->priv;
- pAC = pNet->pAC;
- next = pAC->Next;
-
- netif_stop_queue(SkGeRootDev);
- SkGeYellowLED(pAC, pAC->IoBase, 0);
-
- if(pAC->BoardLevel == SK_INIT_RUN) {
- /* board is still alive */
- spin_lock_irqsave(&pAC->SlowPathLock, Flags);
- EvPara.Para32[0] = 0;
- EvPara.Para32[1] = -1;
- SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
- EvPara.Para32[0] = 1;
- EvPara.Para32[1] = -1;
- SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
- SkEventDispatcher(pAC, pAC->IoBase);
- /* disable interrupts */
- SK_OUT32(pAC->IoBase, B0_IMSK, 0);
- SkGeDeInit(pAC, pAC->IoBase);
- spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
- pAC->BoardLevel = SK_INIT_DATA;
- /* We do NOT check here, if IRQ was pending, of course*/
- }
-
- if(pAC->BoardLevel == SK_INIT_IO) {
- /* board is still alive */
- SkGeDeInit(pAC, pAC->IoBase);
- pAC->BoardLevel = SK_INIT_DATA;
- }
-
- if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 2){
- unregister_netdev(pAC->dev[1]);
- free_netdev(pAC->dev[1]);
- }
-
- FreeResources(SkGeRootDev);
-
- SkGeRootDev->get_stats = NULL;
- /*
- * otherwise unregister_netdev calls get_stats with
- * invalid IO ... :-(
- */
- unregister_netdev(SkGeRootDev);
- free_netdev(SkGeRootDev);
- kfree(pAC);
- SkGeRootDev = next;
- }
-
-#ifdef CONFIG_PROC_FS
- /* clear proc-dir */
- remove_proc_entry(pSkRootDir->name, proc_net);
-#endif
-
-} /* skge_cleanup_module */
-
-module_init(skge_init_module);
-module_exit(skge_cleanup_module);
-
-
/*****************************************************************************
*
* SkGeBoardInit - do level 0 and 1 initialization
@@ -3094,8 +2690,7 @@ SK_EVPARA EvPara;
SkEventDispatcher(pAC, pAC->IoBase);
for (i=0; i<pAC->GIni.GIMacsFound; i++) {
- spin_lock_irqsave(
- &pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock, Flags);
+ spin_lock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock);
netif_stop_queue(pAC->dev[i]);
}
@@ -4774,12 +4369,10 @@ SK_BOOL DualNet;
spin_lock_irqsave(
&pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
Flags);
- spin_lock_irqsave(
- &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags);
+ spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_SOFT_RST);
SkGeStopPort(pAC, IoC, ToPort, SK_STOP_ALL, SK_SOFT_RST);
- spin_unlock_irqrestore(
- &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags);
+ spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
spin_unlock_irqrestore(
&pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
Flags);
@@ -4792,8 +4385,7 @@ SK_BOOL DualNet;
spin_lock_irqsave(
&pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
Flags);
- spin_lock_irqsave(
- &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags);
+ spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
pAC->ActivePort = ToPort;
#if 0
SetQueueSizes(pAC);
@@ -4808,8 +4400,7 @@ SK_BOOL DualNet;
pAC,
pAC->ActivePort,
DualNet)) {
- spin_unlock_irqrestore(
- &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags);
+ spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
spin_unlock_irqrestore(
&pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
Flags);
@@ -4835,8 +4426,7 @@ SK_BOOL DualNet;
SkGePollTxD(pAC, IoC, ToPort, SK_TRUE);
ClearAndStartRx(pAC, FromPort);
ClearAndStartRx(pAC, ToPort);
- spin_unlock_irqrestore(
- &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags);
+ spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
spin_unlock_irqrestore(
&pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
Flags);
@@ -5311,8 +4901,316 @@ int l;
#endif
-/*******************************************************************************
- *
- * End of file
- *
- ******************************************************************************/
+static int __devinit skge_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ SK_AC *pAC;
+ DEV_NET *pNet = NULL;
+ struct net_device *dev = NULL;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *pProcFile;
+#endif
+ static int boards_found = 0;
+ int error = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ goto out;
+
+ /* Configure DMA attributes. */
+ if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL) &&
+ pci_set_dma_mask(pdev, (u64) 0xffffffff))
+ goto out_disable_device;
+
+
+ if ((dev = alloc_etherdev(sizeof(DEV_NET))) == NULL) {
+ printk(KERN_ERR "Unable to allocate etherdev "
+ "structure!\n");
+ goto out_disable_device;
+ }
+
+ pNet = dev->priv;
+ pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL);
+ if (!pNet->pAC) {
+ printk(KERN_ERR "Unable to allocate adapter "
+ "structure!\n");
+ goto out_free_netdev;
+ }
+
+ memset(pNet->pAC, 0, sizeof(SK_AC));
+ pAC = pNet->pAC;
+ pAC->PciDev = pdev;
+ pAC->PciDevId = pdev->device;
+ pAC->dev[0] = dev;
+ pAC->dev[1] = dev;
+ sprintf(pAC->Name, "SysKonnect SK-98xx");
+ pAC->CheckQueue = SK_FALSE;
+
+ pNet->Mtu = 1500;
+ pNet->Up = 0;
+ dev->irq = pdev->irq;
+ error = SkGeInitPCI(pAC);
+ if (error) {
+ printk("SKGE: PCI setup failed: %i\n", error);
+ goto out_free_netdev;
+ }
+
+ SET_MODULE_OWNER(dev);
+ dev->open = &SkGeOpen;
+ dev->stop = &SkGeClose;
+ dev->hard_start_xmit = &SkGeXmit;
+ dev->get_stats = &SkGeStats;
+ dev->last_stats = &SkGeStats;
+ dev->set_multicast_list = &SkGeSetRxMode;
+ dev->set_mac_address = &SkGeSetMacAddr;
+ dev->do_ioctl = &SkGeIoctl;
+ dev->change_mtu = &SkGeChangeMtu;
+ dev->flags &= ~IFF_RUNNING;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#ifdef SK_ZEROCOPY
+#ifdef USE_SK_TX_CHECKSUM
+ if (pAC->ChipsetType) {
+ /* Use only if yukon hardware */
+ /* SK and ZEROCOPY - fly baby... */
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ }
+#endif
+#endif
+
+ pAC->Index = boards_found++;
+
+ if (SkGeBoardInit(dev, pAC))
+ goto out_free_netdev;
+
+ /* Register net device */
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SKGE: Could not register device.\n");
+ goto out_free_resources;
+ }
+
+ /* Print adapter specific string from vpd */
+ ProductStr(pAC);
+ printk("%s: %s\n", dev->name, pAC->DeviceStr);
+
+ /* Print configuration settings */
+ printk(" PrefPort:%c RlmtMode:%s\n",
+ 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber,
+ (pAC->RlmtMode==0) ? "Check Link State" :
+ ((pAC->RlmtMode==1) ? "Check Link State" :
+ ((pAC->RlmtMode==3) ? "Check Local Port" :
+ ((pAC->RlmtMode==7) ? "Check Segmentation" :
+ ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error")))));
+
+ SkGeYellowLED(pAC, pAC->IoBase, 1);
+
+
+ memcpy(&dev->dev_addr, &pAC->Addr.Net[0].CurrentMacAddress, 6);
+
+#ifdef CONFIG_PROC_FS
+ pProcFile = create_proc_entry(dev->name, S_IRUGO, pSkRootDir);
+ if (pProcFile) {
+ pProcFile->proc_fops = &sk_proc_fops;
+ pProcFile->data = dev;
+ pProcFile->owner = THIS_MODULE;
+ }
+#endif
+
+ pNet->PortNr = 0;
+ pNet->NetNr = 0;
+
+ boards_found++;
+
+ /* More then one port found */
+ if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
+ if ((dev = alloc_etherdev(sizeof(DEV_NET))) == 0) {
+ printk(KERN_ERR "Unable to allocate etherdev "
+ "structure!\n");
+ goto out;
+ }
+
+ pAC->dev[1] = dev;
+ pNet = dev->priv;
+ pNet->PortNr = 1;
+ pNet->NetNr = 1;
+ pNet->pAC = pAC;
+ pNet->Mtu = 1500;
+ pNet->Up = 0;
+
+ dev->open = &SkGeOpen;
+ dev->stop = &SkGeClose;
+ dev->hard_start_xmit = &SkGeXmit;
+ dev->get_stats = &SkGeStats;
+ dev->last_stats = &SkGeStats;
+ dev->set_multicast_list = &SkGeSetRxMode;
+ dev->set_mac_address = &SkGeSetMacAddr;
+ dev->do_ioctl = &SkGeIoctl;
+ dev->change_mtu = &SkGeChangeMtu;
+ dev->flags &= ~IFF_RUNNING;
+
+#ifdef SK_ZEROCOPY
+#ifdef USE_SK_TX_CHECKSUM
+ if (pAC->ChipsetType) {
+ /* SG and ZEROCOPY - fly baby... */
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ }
+#endif
+#endif
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SKGE: Could not register device.\n");
+ free_netdev(dev);
+ pAC->dev[1] = pAC->dev[0];
+ } else {
+#ifdef CONFIG_PROC_FS
+ pProcFile = create_proc_entry(dev->name, S_IRUGO,
+ pSkRootDir);
+ if (pProcFile) {
+ pProcFile->proc_fops = &sk_proc_fops;
+ pProcFile->data = dev;
+ pProcFile->owner = THIS_MODULE;
+ }
+#endif
+
+ memcpy(&dev->dev_addr,
+ &pAC->Addr.Net[1].CurrentMacAddress, 6);
+
+ printk("%s: %s\n", dev->name, pAC->DeviceStr);
+ printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
+ }
+ }
+
+ /* Save the hardware revision */
+ pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) +
+ (pAC->GIni.GIPciHwRev & 0x0F);
+
+ /* Set driver globals */
+ pAC->Pnmi.pDriverFileName = DRIVER_FILE_NAME;
+ pAC->Pnmi.pDriverReleaseDate = DRIVER_REL_DATE;
+
+ memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA));
+ memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA));
+
+ pci_set_drvdata(pdev, dev);
+ return 0;
+
+ out_free_resources:
+ FreeResources(dev);
+ out_free_netdev:
+ free_netdev(dev);
+ out_disable_device:
+ pci_disable_device(pdev);
+ out:
+ return error;
+}
+
+static void __devexit skge_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ DEV_NET *pNet = (DEV_NET *) dev->priv;
+ SK_AC *pAC = pNet->pAC;
+ int have_second_mac = 0;
+
+ if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 2)
+ have_second_mac = 1;
+
+ unregister_netdev(dev);
+ if (have_second_mac)
+ unregister_netdev(pAC->dev[1]);
+
+ SkGeYellowLED(pAC, pAC->IoBase, 0);
+
+ if (pAC->BoardLevel == SK_INIT_RUN) {
+ SK_EVPARA EvPara;
+ unsigned long Flags;
+
+ /* board is still alive */
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ EvPara.Para32[0] = 0;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ EvPara.Para32[0] = 1;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ SkEventDispatcher(pAC, pAC->IoBase);
+ /* disable interrupts */
+ SK_OUT32(pAC->IoBase, B0_IMSK, 0);
+ SkGeDeInit(pAC, pAC->IoBase);
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ pAC->BoardLevel = SK_INIT_DATA;
+ /* We do NOT check here, if IRQ was pending, of course*/
+ }
+
+ if (pAC->BoardLevel == SK_INIT_IO) {
+ /* board is still alive */
+ SkGeDeInit(pAC, pAC->IoBase);
+ pAC->BoardLevel = SK_INIT_DATA;
+ }
+
+ FreeResources(dev);
+ free_netdev(dev);
+ if (have_second_mac)
+ free_netdev(pAC->dev[1]);
+ kfree(pAC);
+}
+
+static struct pci_device_id skge_pci_tbl[] = {
+ { PCI_VENDOR_ID_3COM, 0x1700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+#if 0 /* don't handle Yukon2 cards at the moment -- mlindner@syskonnect.de */
+ { PCI_VENDOR_ID_MARVELL, 0x4360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MARVELL, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+#endif
+ { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0, }
+};
+
+static struct pci_driver skge_driver = {
+ .name = "skge",
+ .id_table = skge_pci_tbl,
+ .probe = skge_probe_one,
+ .remove = __devexit_p(skge_remove_one),
+};
+
+static int __init skge_init(void)
+{
+ int error;
+
+ memcpy(&SK_Root_Dir_entry, BOOT_STRING, sizeof(SK_Root_Dir_entry) - 1);
+
+#ifdef CONFIG_PROC_FS
+ pSkRootDir = proc_mkdir(SK_Root_Dir_entry, proc_net);
+ if (!pSkRootDir) {
+ printk(KERN_WARNING "Unable to create /proc/net/%s",
+ SK_Root_Dir_entry);
+ return -ENOMEM;
+ }
+ pSkRootDir->owner = THIS_MODULE;
+#endif
+
+ error = pci_module_init(&skge_driver);
+ if (error) {
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(pSkRootDir->name, proc_net);
+#endif
+ }
+
+ return error;
+}
+
+static void __exit skge_exit(void)
+{
+ pci_unregister_driver(&skge_driver);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(pSkRootDir->name, proc_net);
+#endif
+}
+
+module_init(skge_init);
+module_exit(skge_exit);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 6c92063013595a..e1c6f3c799a020 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -125,11 +125,16 @@
LK1.1.19 (Roger Luethi)
- Increase Tx threshold for unspecified errors
+ LK1.2.0-2.6 (Roger Luethi)
+ - Massive clean-up
+ - Rewrite PHY, media handling (remove options, full_duplex, backoff)
+ - Fix Tx engine race for good
+
*/
#define DRV_NAME "via-rhine"
-#define DRV_VERSION "1.1.20-2.6"
-#define DRV_RELDATE "May-23-2004"
+#define DRV_VERSION "1.2.0-2.6"
+#define DRV_RELDATE "June-10-2004"
/* A few user-configurable values.
@@ -142,22 +147,10 @@ static int max_interrupt_work = 20;
Setting to > 1518 effectively disables this feature. */
static int rx_copybreak;
-/* Select a backoff algorithm (Ethernet capture effect) */
-static int backoff;
-
-/* Used to pass the media type, etc.
- Both 'options[]' and 'full_duplex[]' should exist for driver
- interoperability.
- The media type is usually passed in 'options[]'.
- The default is autonegotiation for speed and duplex.
- This should rarely be overridden.
- Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
- Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
- Use option values 0x20 and 0x200 for forcing full duplex operation.
-*/
-#define MAX_UNITS 8 /* More are supported, limit only on options */
-static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+/*
+ * In case you are looking for 'options[]' or 'full_duplex[]', they
+ * are gone. Use ethtool(8) instead.
+ */
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
The Rhine has a 64 element 8390-like hash table. */
@@ -210,9 +203,6 @@ static const int multicast_filter_limit = 32;
static char version[] __devinitdata =
KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
-static char shortname[] = DRV_NAME;
-
-
/* This driver was written to use PCI memory space. Some early versions
of the Rhine may only work correctly with I/O space accesses. */
#ifdef CONFIG_VIA_RHINE_MMIO
@@ -239,15 +229,9 @@ MODULE_LICENSE("GPL");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(debug, "i");
MODULE_PARM(rx_copybreak, "i");
-MODULE_PARM(backoff, "i");
-MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
-MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
-MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
-MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
/*
Theory of Operation
@@ -350,24 +334,24 @@ The chip does not pad to minimum transmit length.
enum rhine_revs {
VT86C100A = 0x00,
+ VTunknown0 = 0x20,
VT6102 = 0x40,
VT8231 = 0x50, /* Integrated MAC */
VT8233 = 0x60, /* Integrated MAC */
VT8235 = 0x74, /* Integrated MAC */
VT8237 = 0x78, /* Integrated MAC */
- VTunknown0 = 0x7C,
+ VTunknown1 = 0x7C,
VT6105 = 0x80,
VT6105_B0 = 0x83,
VT6105L = 0x8A,
VT6107 = 0x8C,
- VTunknown1 = 0x8E,
+ VTunknown2 = 0x8E,
VT6105M = 0x90,
};
enum rhine_quirks {
rqWOL = 0x0001, /* Wake-On-LAN support */
rqForceReset = 0x0002,
- rqDavicomPhy = 0x0020,
rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
rqRhineI = 0x0100, /* See comment below */
@@ -395,6 +379,7 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
/* Offsets to the device registers. */
enum register_offsets {
StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+ ChipCmd1=0x09,
IntrStatus=0x0C, IntrEnable=0x0E,
MulticastFilter0=0x10, MulticastFilter1=0x14,
RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
@@ -403,8 +388,8 @@ enum register_offsets {
ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
StickyHW=0x83, IntrStatus2=0x84,
- WOLcrSet=0xA0, WOLcrClr=0xA4, WOLcrClr1=0xA6,
- WOLcgClr=0xA7,
+ WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
+ WOLcrClr1=0xA6, WOLcgClr=0xA7,
PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
};
@@ -436,6 +421,15 @@ enum intr_status_bits {
IntrTxErrSummary=0x082218,
};
+/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
+enum wol_bits {
+ WOLucast = 0x10,
+ WOLmagic = 0x20,
+ WOLbmcast = 0x30,
+ WOLlnkon = 0x40,
+ WOLlnkoff = 0x80,
+};
+
/* The Rx and Tx buffer descriptors. */
struct rx_desc {
s32 rx_status;
@@ -464,13 +458,12 @@ enum desc_status_bits {
/* Bits in ChipCmd. */
enum chip_cmd_bits {
- CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
- CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
- CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
- CmdNoTxPoll=0x0800, CmdReset=0x8000,
+ CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
+ CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
+ Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
+ Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
};
-#define MAX_MII_CNT 4
struct rhine_private {
/* Descriptor rings */
struct rx_desc *rx_ring;
@@ -493,7 +486,6 @@ struct rhine_private {
struct pci_dev *pdev;
struct net_device_stats stats;
- struct timer_list timer; /* Media monitoring timer. */
spinlock_t lock;
/* Frequently used values: keep some adjacent for cache effect. */
@@ -502,23 +494,16 @@ struct rhine_private {
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
- u16 chip_cmd; /* Current setting for ChipCmd */
+ u8 wolopts;
- /* These values are keep track of the transceiver/media in use. */
u8 tx_thresh, rx_thresh;
- /* MII transceiver section. */
- unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */
- unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */
- u16 mii_status; /* last read MII status */
struct mii_if_info mii_if;
};
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
-static void rhine_check_duplex(struct net_device *dev);
-static void rhine_timer(unsigned long data);
static void rhine_tx_timeout(struct net_device *dev);
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -530,6 +515,16 @@ static struct net_device_stats *rhine_get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
+static void rhine_shutdown (struct device *gdev);
+
+#define RHINE_WAIT_FOR(condition) do { \
+ int i=1024; \
+ while (!(condition) && --i) \
+ ; \
+ if (debug > 1 && i < 512) \
+ printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
+ DRV_NAME, 1024-i, __func__, __LINE__); \
+} while(0)
static inline u32 get_intr_status(struct net_device *dev)
{
@@ -546,12 +541,13 @@ static inline u32 get_intr_status(struct net_device *dev)
/*
* Get power related registers into sane state.
- * Returns content of power-event (WOL) registers.
+ * Notify user about past WOL event.
*/
static void rhine_power_init(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
+ u16 wolstat;
if (rp->quirks & rqWOL) {
/* Make sure chip is in power state D0 */
@@ -566,63 +562,109 @@ static void rhine_power_init(struct net_device *dev)
if (rp->quirks & rq6patterns)
writeb(0x03, ioaddr + WOLcrClr1);
+ /* Save power-event status bits */
+ wolstat = readb(ioaddr + PwrcsrSet);
+ if (rp->quirks & rq6patterns)
+ wolstat |= (readb(ioaddr + PwrcsrSet1) & 0x03) << 8;
+
/* Clear power-event status bits */
writeb(0xFF, ioaddr + PwrcsrClr);
if (rp->quirks & rq6patterns)
writeb(0x03, ioaddr + PwrcsrClr1);
+
+ if (wolstat) {
+ char *reason;
+ switch (wolstat) {
+ case WOLmagic:
+ reason = "Magic packet";
+ break;
+ case WOLlnkon:
+ reason = "Link went up";
+ break;
+ case WOLlnkoff:
+ reason = "Link went down";
+ break;
+ case WOLucast:
+ reason = "Unicast packet";
+ break;
+ case WOLbmcast:
+ reason = "Multicast/broadcast packet";
+ break;
+ default:
+ reason = "Unknown";
+ }
+ printk("%s: Woke system up. Reason: %s.\n",
+ DRV_NAME, reason);
+ }
}
}
-static void wait_for_reset(struct net_device *dev, u32 quirks, char *name)
+static void rhine_chip_reset(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- int boguscnt = 20;
+ struct rhine_private *rp = netdev_priv(dev);
+ writeb(Cmd1Reset, ioaddr + ChipCmd1);
IOSYNC;
- if (readw(ioaddr + ChipCmd) & CmdReset) {
+ if (readb(ioaddr + ChipCmd1) & Cmd1Reset) {
printk(KERN_INFO "%s: Reset not complete yet. "
- "Trying harder.\n", name);
+ "Trying harder.\n", DRV_NAME);
- /* Rhine-II needs to be forced sometimes */
- if (quirks & rqForceReset)
+ /* Force reset */
+ if (rp->quirks & rqForceReset)
writeb(0x40, ioaddr + MiscCmd);
- /* VT86C100A may need long delay after reset (dlink) */
- /* Seen on Rhine-II as well (rl) */
- while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
- udelay(5);
-
+ /* Reset can take somewhat longer (rare) */
+ RHINE_WAIT_FOR(!(readb(ioaddr + ChipCmd1) & Cmd1Reset));
}
if (debug > 1)
- printk(KERN_INFO "%s: Reset %s.\n", name,
- boguscnt ? "succeeded" : "failed");
+ printk(KERN_INFO "%s: Reset %s.\n", dev->name,
+ (readb(ioaddr + ChipCmd1) & Cmd1Reset) ?
+ "failed" : "succeeded");
}
#ifdef USE_MMIO
-static void __devinit enable_mmio(long ioaddr, u32 quirks)
+static void __devinit enable_mmio(long pioaddr, u32 quirks)
{
int n;
if (quirks & rqRhineI) {
/* More recent docs say that this bit is reserved ... */
- n = inb(ioaddr + ConfigA) | 0x20;
- outb(n, ioaddr + ConfigA);
+ n = inb(pioaddr + ConfigA) | 0x20;
+ outb(n, pioaddr + ConfigA);
} else {
- n = inb(ioaddr + ConfigD) | 0x80;
- outb(n, ioaddr + ConfigD);
+ n = inb(pioaddr + ConfigD) | 0x80;
+ outb(n, pioaddr + ConfigD);
}
}
#endif
-static void __devinit reload_eeprom(long ioaddr)
+/*
+ * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
+ * (plus 0x6C for Rhine-I/II)
+ */
+static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
{
- int i;
- outb(0x20, ioaddr + MACRegEEcsr);
- /* Typically 2 cycles to reload. */
- for (i = 0; i < 150; i++)
- if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
- break;
+ long ioaddr = dev->base_addr;
+ struct rhine_private *rp = netdev_priv(dev);
+
+ outb(0x20, pioaddr + MACRegEEcsr);
+ RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
+
+#ifdef USE_MMIO
+ /*
+ * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
+ * MMIO. If reloading EEPROM was done first this could be avoided, but
+ * it is not known if that still works with the "win98-reboot" problem.
+ */
+ enable_mmio(pioaddr, rp->quirks);
+#endif
+
+ /* Turn off EEPROM-controlled wake-up (magic packet) */
+ if (rp->quirks & rqWOL)
+ writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
+
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -634,23 +676,34 @@ static void rhine_poll(struct net_device *dev)
}
#endif
+static void rhine_hw_init(struct net_device *dev, long pioaddr)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ rhine_chip_reset(dev);
+
+ /* Rhine-I needs extra time to recuperate before EEPROM reload */
+ if (rp->quirks & rqRhineI)
+ msleep(5);
+
+ /* Reload EEPROM controlled bytes cleared by soft reset */
+ rhine_reload_eeprom(pioaddr, dev);
+}
+
static int __devinit rhine_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct rhine_private *rp;
- int i, option, rc;
+ int i, rc;
u8 pci_rev;
u32 quirks;
- static int card_idx = -1;
- long ioaddr;
+ long pioaddr;
long memaddr;
- int io_size;
- int phy, phy_idx = 0;
-#ifdef USE_MMIO
- long ioaddr0;
-#endif
- const char *name;
+ long ioaddr;
+ int io_size, phy_id;
+ const char *name, *mname;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -659,26 +712,47 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
printk(version);
#endif
- card_idx++;
- option = card_idx < MAX_UNITS ? options[card_idx] : 0;
pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
io_size = 256;
- if (pci_rev < VT6102) {
- quirks = rqRhineI | rqDavicomPhy;
+ phy_id = 0;
+ quirks = 0;
+ name = "Rhine";
+ mname = "unknown";
+ if (pci_rev < VTunknown0) {
+ quirks = rqRhineI;
io_size = 128;
- name = "VT86C100A Rhine";
+ mname = "VT86C100A";
}
- else {
+ else if (pci_rev >= VT6102) {
quirks = rqWOL | rqForceReset;
if (pci_rev < VT6105) {
name = "Rhine II";
quirks |= rqStatusWBRace; /* Rhine-II exclusive */
+ if (pci_rev < VT8231)
+ mname = "VT6102";
+ else if (pci_rev < VT8233)
+ mname = "VT8231";
+ else if (pci_rev < VT8235)
+ mname = "VT8233";
+ else if (pci_rev < VT8237)
+ mname = "VT8235";
+ else if (pci_rev < VTunknown1)
+ mname = "VT8237";
}
else {
name = "Rhine III";
+ phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
if (pci_rev >= VT6105_B0)
quirks |= rq6patterns;
+ if (pci_rev < VT6105L)
+ mname = "VT6105";
+ else if (pci_rev < VT6107)
+ mname = "VT6105L";
+ else if (pci_rev < VT6105M)
+ mname = "VT6107";
+ else if (pci_rev >= VT6105M)
+ mname = "Management Adapter VT6105M";
}
}
@@ -702,28 +776,26 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
goto err_out;
}
- ioaddr = pci_resource_start(pdev, 0);
+ pioaddr = pci_resource_start(pdev, 0);
memaddr = pci_resource_start(pdev, 1);
pci_set_master(pdev);
- dev = alloc_etherdev(sizeof(*rp));
- if (dev == NULL) {
+ dev = alloc_etherdev(sizeof(struct rhine_private));
+ if (!dev) {
rc = -ENOMEM;
- printk(KERN_ERR "init_ethernet failed for card #%d\n",
- card_idx);
+ printk(KERN_ERR "alloc_etherdev failed\n");
goto err_out;
}
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- rc = pci_request_regions(pdev, shortname);
+ rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out_free_netdev;
#ifdef USE_MMIO
- ioaddr0 = ioaddr;
- enable_mmio(ioaddr0, quirks);
+ enable_mmio(pioaddr, quirks);
ioaddr = (long) ioremap(memaddr, io_size);
if (!ioaddr) {
@@ -737,7 +809,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
i = 0;
while (mmio_verify_registers[i]) {
int reg = mmio_verify_registers[i++];
- unsigned char a = inb(ioaddr0+reg);
+ unsigned char a = inb(pioaddr+reg);
unsigned char b = readb(ioaddr+reg);
if (a != b) {
rc = -EIO;
@@ -746,51 +818,30 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
}
+#else
+ ioaddr = pioaddr;
#endif /* USE_MMIO */
+
dev->base_addr = ioaddr;
rp = netdev_priv(dev);
rp->quirks = quirks;
+ /* Get chip registers into a sane state */
rhine_power_init(dev);
-
- /* Reset the chip to erase previous misconfiguration. */
- writew(CmdReset, ioaddr + ChipCmd);
-
- wait_for_reset(dev, quirks, shortname);
-
- /* Reload the station address from the EEPROM. */
-#ifdef USE_MMIO
- reload_eeprom(ioaddr0);
- /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
- If reload_eeprom() was done first this could be avoided, but it is
- not known if that still works with the "win98-reboot" problem. */
- enable_mmio(ioaddr0, quirks);
-#else
- reload_eeprom(ioaddr);
-#endif
+ rhine_hw_init(dev, pioaddr);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
if (!is_valid_ether_addr(dev->dev_addr)) {
rc = -EIO;
- printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
+ printk(KERN_ERR "Invalid MAC address\n");
goto err_out_unmap;
}
- if (quirks & rqWOL) {
- /*
- * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
- * turned on. it makes MAC receive magic packet
- * automatically. So, we turn it off. (D-Link)
- */
- writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
- }
-
- /* Select backoff algorithm */
- if (backoff)
- writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
- ioaddr + ConfigD);
+ /* For Rhine-I/II, phy_id is loaded from EEPROM */
+ if (!phy_id)
+ phy_id = readb(ioaddr + 0x6C);
dev->irq = pdev->irq;
@@ -802,9 +853,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
rp->mii_if.phy_id_mask = 0x1f;
rp->mii_if.reg_num_mask = 0x1f;
- if (dev->mem_start)
- option = dev->mem_start;
-
/* The chip-specific entries in the device structure. */
dev->open = rhine_open;
dev->hard_start_xmit = rhine_start_tx;
@@ -826,22 +874,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rc)
goto err_out_unmap;
- /* The lower four bits are the media type. */
- if (option > 0) {
- if (option & 0x220)
- rp->mii_if.full_duplex = 1;
- }
- if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
- rp->mii_if.full_duplex = 1;
-
- if (rp->mii_if.full_duplex) {
- printk(KERN_INFO "%s: Set to forced full duplex, "
- "autonegotiation disabled.\n", dev->name);
- rp->mii_if.force_media = 1;
- }
-
- printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
- dev->name, name,
+ printk(KERN_INFO "%s: VIA %s (%s) at 0x%lx, ",
+ dev->name, name, mname,
#ifdef USE_MMIO
memaddr
#else
@@ -855,17 +889,15 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
- rp->phys[0] = 1; /* Standard for this chip. */
- for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
- int mii_status = mdio_read(dev, phy, 1);
+ {
+ int mii_status = mdio_read(dev, phy_id, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
- rp->phys[phy_idx++] = phy;
- rp->mii_if.advertising = mdio_read(dev, phy, 4);
+ rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
printk(KERN_INFO "%s: MII PHY found at address "
"%d, status 0x%4.4x advertising %4.4x "
- "Link %4.4x.\n", dev->name, phy,
+ "Link %4.4x.\n", dev->name, phy_id,
mii_status, rp->mii_if.advertising,
- mdio_read(dev, phy, 5));
+ mdio_read(dev, phy_id, 5));
/* set IFF_RUNNING */
if (mii_status & BMSR_LSTATUS)
@@ -873,27 +905,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
else
netif_carrier_off(dev);
- break;
- }
- }
- rp->mii_cnt = phy_idx;
- rp->mii_if.phy_id = rp->phys[0];
-
- /* Allow forcing the media type. */
- if (option > 0) {
- if (option & 0x220)
- rp->mii_if.full_duplex = 1;
- if (option & 0x330) {
- printk(KERN_INFO " Forcing %dMbs %s-duplex "
- "operation.\n",
- (option & 0x300 ? 100 : 10),
- (option & 0x220 ? "full" : "half"));
- if (rp->mii_cnt)
- mdio_write(dev, rp->phys[0], MII_BMCR,
- ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
- ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
}
}
+ rp->mii_if.phy_id = phy_id;
return 0;
@@ -1065,6 +1079,21 @@ static void free_tbufs(struct net_device* dev)
}
}
+static void rhine_check_media(struct net_device *dev, unsigned int init_media)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ mii_check_media(&rp->mii_if, debug, init_media);
+
+ if (rp->mii_if.full_duplex)
+ writeb(readb(ioaddr + ChipCmd1) | Cmd1FDuplex,
+ ioaddr + ChipCmd1);
+ else
+ writeb(readb(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
+ ioaddr + ChipCmd1);
+}
+
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1080,7 +1109,6 @@ static void init_registers(struct net_device *dev)
writeb(0x20, ioaddr + TxConfig);
rp->tx_thresh = 0x20;
rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
- rp->mii_if.full_duplex = 0;
writel(rp->rx_ring_dma, ioaddr + RxRingPtr);
writel(rp->tx_ring_dma, ioaddr + TxRingPtr);
@@ -1094,17 +1122,44 @@ static void init_registers(struct net_device *dev)
IntrPCIErr | IntrStatsMax | IntrLinkChange,
ioaddr + IntrEnable);
- rp->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
- if (rp->mii_if.force_media)
- rp->chip_cmd |= CmdFDuplex;
- writew(rp->chip_cmd, ioaddr + ChipCmd);
+ writew(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
+ ioaddr + ChipCmd);
+ rhine_check_media(dev, 1);
+}
+
+/* Enable MII link status auto-polling (required for IntrLinkChange) */
+static void rhine_enable_linkmon(long ioaddr)
+{
+ writeb(0, ioaddr + MIICmd);
+ writeb(MII_BMSR, ioaddr + MIIRegAddr);
+ writeb(0x80, ioaddr + MIICmd);
+
+ RHINE_WAIT_FOR((readb(ioaddr + MIIRegAddr) & 0x20));
+
+ writeb(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
+}
+
+/* Disable MII link status auto-polling (required for MDIO access) */
+static void rhine_disable_linkmon(long ioaddr, u32 quirks)
+{
+ writeb(0, ioaddr + MIICmd);
+
+ if (quirks & rqRhineI) {
+ writeb(0x01, ioaddr + MIIRegAddr); // MII_BMSR
+
+ /* Can be called from ISR. Evil. */
+ mdelay(1);
+
+ /* 0x80 must be set immediately before turning it off */
+ writeb(0x80, ioaddr + MIICmd);
- rhine_check_duplex(dev);
+ RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x20);
- /* The LED outputs of various MII xcvrs should be configured. */
- /* For NS or Mison phys, turn on bit 1 in register 0x17 */
- mdio_write(dev, rp->phys[0], 0x17, mdio_read(dev, rp->phys[0], 0x17) |
- 0x0001);
+ /* Heh. Now clear 0x80 again. */
+ writeb(0, ioaddr + MIICmd);
+ }
+ else
+ RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x80);
}
/* Read and write over the MII Management Data I/O (MDIO) interface. */
@@ -1112,156 +1167,72 @@ static void init_registers(struct net_device *dev)
static int mdio_read(struct net_device *dev, int phy_id, int regnum)
{
long ioaddr = dev->base_addr;
- int boguscnt = 1024;
+ struct rhine_private *rp = netdev_priv(dev);
+ int result;
+
+ rhine_disable_linkmon(ioaddr, rp->quirks);
- /* Wait for a previous command to complete. */
- while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
- ;
- writeb(0x00, ioaddr + MIICmd);
+ writeb(0, ioaddr + MIICmd);
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writeb(0x40, ioaddr + MIICmd); /* Trigger read */
- boguscnt = 1024;
- while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
- ;
- return readw(ioaddr + MIIData);
+ RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x40));
+ result = readw(ioaddr + MIIData);
+
+ rhine_enable_linkmon(ioaddr);
+ return result;
}
static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- int boguscnt = 1024;
- if (phy_id == rp->phys[0]) {
- switch (regnum) {
- case MII_BMCR: /* Is user forcing speed/duplex? */
- if (value & 0x9000) /* Autonegotiation. */
- rp->mii_if.force_media = 0;
- else
- rp->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
- break;
- case MII_ADVERTISE:
- rp->mii_if.advertising = value;
- break;
- }
- }
+ rhine_disable_linkmon(ioaddr, rp->quirks);
- /* Wait for a previous command to complete. */
- while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
- ;
- writeb(0x00, ioaddr + MIICmd);
+ writeb(0, ioaddr + MIICmd);
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writew(value, ioaddr + MIIData);
- writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
-}
+ writeb(0x20, ioaddr + MIICmd); /* Trigger write */
+ RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x20));
+ rhine_enable_linkmon(ioaddr);
+}
static int rhine_open(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- int i;
-
- /* Reset the chip. */
- writew(CmdReset, ioaddr + ChipCmd);
+ int rc;
- i = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
+ rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
dev);
- if (i)
- return i;
+ if (rc)
+ return rc;
if (debug > 1)
printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
dev->name, rp->pdev->irq);
- i = alloc_ring(dev);
- if (i)
- return i;
+ rc = alloc_ring(dev);
+ if (rc)
+ return rc;
alloc_rbufs(dev);
alloc_tbufs(dev);
- wait_for_reset(dev, rp->quirks, dev->name);
+ rhine_chip_reset(dev);
init_registers(dev);
if (debug > 2)
printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
"MII status: %4.4x.\n",
dev->name, readw(ioaddr + ChipCmd),
- mdio_read(dev, rp->phys[0], MII_BMSR));
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
netif_start_queue(dev);
- /* Set the timer to check for link beat. */
- init_timer(&rp->timer);
- rp->timer.expires = jiffies + 2 * HZ/100;
- rp->timer.data = (unsigned long)dev;
- rp->timer.function = &rhine_timer; /* timer handler */
- add_timer(&rp->timer);
-
return 0;
}
-static void rhine_check_duplex(struct net_device *dev)
-{
- struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int mii_lpa = mdio_read(dev, rp->phys[0], MII_LPA);
- int negotiated = mii_lpa & rp->mii_if.advertising;
- int duplex;
-
- if (rp->mii_if.force_media || mii_lpa == 0xffff)
- return;
- duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
- if (rp->mii_if.full_duplex != duplex) {
- rp->mii_if.full_duplex = duplex;
- if (debug)
- printk(KERN_INFO "%s: Setting %s-duplex based on "
- "MII #%d link partner capability of %4.4x.\n",
- dev->name, duplex ? "full" : "half",
- rp->phys[0], mii_lpa);
- if (duplex)
- rp->chip_cmd |= CmdFDuplex;
- else
- rp->chip_cmd &= ~CmdFDuplex;
- writew(rp->chip_cmd, ioaddr + ChipCmd);
- }
-}
-
-
-static void rhine_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int next_tick = 10*HZ;
- int mii_status;
-
- if (debug > 3) {
- printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
- dev->name, readw(ioaddr + IntrStatus));
- }
-
- spin_lock_irq (&rp->lock);
-
- rhine_check_duplex(dev);
-
- /* make IFF_RUNNING follow the MII status bit "Link established" */
- mii_status = mdio_read(dev, rp->phys[0], MII_BMSR);
- if ((mii_status & BMSR_LSTATUS) != (rp->mii_status & BMSR_LSTATUS)) {
- if (mii_status & BMSR_LSTATUS)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- }
- rp->mii_status = mii_status;
-
- spin_unlock_irq(&rp->lock);
-
- rp->timer.expires = jiffies + next_tick;
- add_timer(&rp->timer);
-}
-
-
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1270,16 +1241,13 @@ static void rhine_tx_timeout(struct net_device *dev)
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
"%4.4x, resetting...\n",
dev->name, readw(ioaddr + IntrStatus),
- mdio_read(dev, rp->phys[0], MII_BMSR));
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
/* protect against concurrent rx interrupts */
disable_irq(rp->pdev->irq);
spin_lock(&rp->lock);
- /* Reset the chip. */
- writew(CmdReset, ioaddr + ChipCmd);
-
/* clear all descriptors */
free_tbufs(dev);
free_rbufs(dev);
@@ -1287,7 +1255,7 @@ static void rhine_tx_timeout(struct net_device *dev)
alloc_rbufs(dev);
/* Reinitialize the hardware. */
- wait_for_reset(dev, rp->quirks, dev->name);
+ rhine_chip_reset(dev);
init_registers(dev);
spin_unlock(&rp->lock);
@@ -1301,8 +1269,8 @@ static void rhine_tx_timeout(struct net_device *dev)
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
unsigned entry;
- u32 intr_status;
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
@@ -1353,14 +1321,9 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
/* Non-x86 Todo: explicitly flush cache lines here. */
- /*
- * Wake the potentially-idle transmit channel unless errors are
- * pending (the ISR must sort them out first).
- */
- intr_status = get_intr_status(dev);
- if ((intr_status & IntrTxErrSummary) == 0) {
- writew(CmdTxDemand | rp->chip_cmd, dev->base_addr + ChipCmd);
- }
+ /* Wake the potentially-idle transmit channel */
+ writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ ioaddr + ChipCmd1);
IOSYNC;
if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
@@ -1408,11 +1371,10 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
if (intr_status & IntrTxErrSummary) {
- int cnt = 20;
/* Avoid scavenging before Tx engine turned off */
- while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt)
- udelay(5);
- if (debug > 2 && !cnt)
+ RHINE_WAIT_FOR(!(readb(ioaddr+ChipCmd) & CmdTxOn));
+ if (debug > 2 &&
+ readb(ioaddr+ChipCmd) & CmdTxOn)
printk(KERN_WARNING "%s: "
"rhine_interrupt() Tx engine"
"still on.\n", dev->name);
@@ -1572,10 +1534,6 @@ static void rhine_rx(struct net_device *dev)
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- /* *_IP_COPYSUM isn't defined anywhere and
- eth_copy_and_sum is memcpy for all archs so
- this is kind of pointless right now
- ... or? */
eth_copy_and_sum(skb,
rp->rx_skbuff[entry]->tail,
pkt_len, 0);
@@ -1627,10 +1585,6 @@ static void rhine_rx(struct net_device *dev)
}
rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
}
-
- /* Pre-emptively restart Rx engine. */
- writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
- dev->base_addr + ChipCmd);
}
/*
@@ -1664,7 +1618,10 @@ static void rhine_restart_tx(struct net_device *dev) {
writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
ioaddr + TxRingPtr);
- writew(CmdTxDemand | rp->chip_cmd, ioaddr + ChipCmd);
+ writeb(readb(ioaddr + ChipCmd) | CmdTxOn,
+ ioaddr + ChipCmd);
+ writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ ioaddr + ChipCmd1);
IOSYNC;
}
else {
@@ -1684,20 +1641,8 @@ static void rhine_error(struct net_device *dev, int intr_status)
spin_lock(&rp->lock);
- if (intr_status & (IntrLinkChange)) {
- if (readb(ioaddr + MIIStatus) & 0x02) {
- /* Link failed, restart autonegotiation. */
- if (rp->quirks & rqRhineI)
- mdio_write(dev, rp->phys[0], MII_BMCR, 0x3300);
- } else
- rhine_check_duplex(dev);
- if (debug)
- printk(KERN_ERR "%s: MII status changed: "
- "Autonegotiation advertising %4.4x partner "
- "%4.4x.\n", dev->name,
- mdio_read(dev, rp->phys[0], MII_ADVERTISE),
- mdio_read(dev, rp->phys[0], MII_LPA));
- }
+ if (intr_status & IntrLinkChange)
+ rhine_check_media(dev, 0);
if (intr_status & IntrStatsMax) {
rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
@@ -1790,7 +1735,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
- mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
writel(mc_filter[0], ioaddr + MulticastFilter0);
writel(mc_filter[1], ioaddr + MulticastFilter1);
@@ -1856,6 +1801,39 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
debug = value;
}
+static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ if (!(rp->quirks & rqWOL))
+ return;
+
+ spin_lock_irq(&rp->lock);
+ wol->supported = WAKE_PHY | WAKE_MAGIC |
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
+ wol->wolopts = rp->wolopts;
+ spin_unlock_irq(&rp->lock);
+}
+
+static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ u32 support = WAKE_PHY | WAKE_MAGIC |
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
+
+ if (!(rp->quirks & rqWOL))
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ spin_lock_irq(&rp->lock);
+ rp->wolopts = wol->wolopts;
+ spin_unlock_irq(&rp->lock);
+
+ return 0;
+}
+
static struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
@@ -1864,6 +1842,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
+ .get_wol = rhine_get_wol,
+ .set_wol = rhine_set_wol,
.get_sg = ethtool_op_get_sg,
.get_tx_csum = ethtool_op_get_tx_csum,
};
@@ -1888,8 +1868,6 @@ static int rhine_close(struct net_device *dev)
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
- del_timer_sync(&rp->timer);
-
spin_lock_irq(&rp->lock);
netif_stop_queue(dev);
@@ -1936,12 +1914,51 @@ static void __devexit rhine_remove_one(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+static void rhine_shutdown (struct device *gendev)
+{
+ struct pci_dev *pdev = to_pci_dev(gendev);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+
+ long ioaddr = dev->base_addr;
+
+ rhine_power_init(dev);
+
+ /* Make sure we use pattern 0, 1 and not 4, 5 */
+ if (rp->quirks & rq6patterns)
+ writeb(0x04, ioaddr + 0xA7);
+
+ if (rp->wolopts & WAKE_MAGIC)
+ writeb(WOLmagic, ioaddr + WOLcrSet);
+
+ if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
+ writeb(WOLbmcast, ioaddr + WOLcgSet);
+
+ if (rp->wolopts & WAKE_PHY)
+ writeb(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
+
+ if (rp->wolopts & WAKE_UCAST)
+ writeb(WOLucast, ioaddr + WOLcrSet);
+
+ /* Enable legacy WOL (for old motherboards) */
+ writeb(0x01, ioaddr + PwcfgSet);
+ writeb(readb(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
+
+ /* Hit power state D3 (sleep) */
+ writeb(readb(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+
+ /* TODO: Check use of pci_enable_wake() */
+
+}
static struct pci_driver rhine_driver = {
- .name = "via-rhine",
+ .name = DRV_NAME,
.id_table = rhine_pci_tbl,
.probe = rhine_init_one,
.remove = __devexit_p(rhine_remove_one),
+ .driver = {
+ .shutdown = rhine_shutdown,
+ }
};
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f053d2a9e4f2cc..af599f8b535b51 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -78,6 +78,8 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
+#include <linux/crc16.h>
+#include <linux/crc32.h>
#include "via-velocity.h"
@@ -226,7 +228,10 @@ VELOCITY_PARAM(wol_opts, "Wake On Lan options");
VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
-static int velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int rx_copybreak = 200;
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+
static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info);
static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
static void velocity_print_info(struct velocity_info *vptr);
@@ -238,10 +243,8 @@ static void velocity_set_multi(struct net_device *dev);
static struct net_device_stats *velocity_get_stats(struct net_device *dev);
static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int velocity_close(struct net_device *dev);
-static int velocity_rx_srv(struct velocity_info *vptr, int status);
static int velocity_receive_frame(struct velocity_info *, int idx);
static int velocity_alloc_rx_buf(struct velocity_info *, int idx);
-static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type);
static void velocity_free_rd_ring(struct velocity_info *vptr);
static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
static int velocity_soft_reset(struct velocity_info *vptr);
@@ -254,12 +257,8 @@ static void enable_flow_control_ability(struct velocity_info *vptr);
static void enable_mii_autopoll(struct mac_regs * regs);
static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata);
static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data);
-static int velocity_set_wol(struct velocity_info *vptr);
-static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context);
-static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context);
static u32 mii_check_media_mode(struct mac_regs * regs);
static u32 check_connection_type(struct mac_regs * regs);
-static void velocity_init_cam_filter(struct velocity_info *vptr);
static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
#ifdef CONFIG_PM
@@ -269,8 +268,9 @@ static int velocity_resume(struct pci_dev *pdev);
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
static struct notifier_block velocity_inetaddr_notifier = {
- notifier_call:velocity_netdev_event,
+ .notifier_call = velocity_netdev_event,
};
+static int velocity_notifier_registered;
#endif /* CONFIG_PM */
@@ -289,8 +289,9 @@ static struct velocity_info_tbl chip_info_table[] = {
*/
static struct pci_device_id velocity_id_table[] __devinitdata = {
- {0x1106, 0x3119, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &chip_info_table[0]},
- {0,}
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) chip_info_table},
+ {0, }
};
MODULE_DEVICE_TABLE(pci, velocity_id_table);
@@ -463,6 +464,12 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
}
}
+static inline void velocity_give_rx_desc(struct rx_desc *rd)
+{
+ *(u32 *)&rd->rdesc0 = 0;
+ rd->rdesc0.owner = cpu_to_le32(OWNED_BY_NIC);
+}
+
/**
* velocity_rx_reset - handle a receive reset
* @vptr: velocity we are resetting
@@ -477,13 +484,13 @@ static void velocity_rx_reset(struct velocity_info *vptr)
struct mac_regs * regs = vptr->mac_regs;
int i;
- vptr->rd_used = vptr->rd_curr = 0;
+ vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
/*
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
- vptr->rd_ring[i].rdesc0.owner = cpu_to_le32(OWNED_BY_NIC);
+ velocity_give_rx_desc(vptr->rd_ring + i);
writew(vptr->options.numrx, &regs->RBRDU);
writel(vptr->rd_pool_dma, &regs->RDBaseLo);
@@ -776,6 +783,12 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
pci_set_power_state(pdev, 3);
out:
+#ifdef CONFIG_PM
+ if (ret == 0 && !velocity_notifier_registered) {
+ velocity_notifier_registered = 1;
+ register_inetaddr_notifier(&velocity_inetaddr_notifier);
+ }
+#endif
return ret;
err_iounmap:
@@ -966,6 +979,60 @@ static void velocity_free_rings(struct velocity_info *vptr)
pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
}
+static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
+{
+ struct mac_regs *regs = vptr->mac_regs;
+ int avail, dirty, unusable;
+
+ /*
+ * RD number must be equal to 4X per hardware spec
+ * (programming guide rev 1.20, p.13)
+ */
+ if (vptr->rd_filled < 4)
+ return;
+
+ wmb();
+
+ unusable = vptr->rd_filled | 0x0003;
+ dirty = vptr->rd_dirty - unusable + 1;
+ for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
+ dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
+ velocity_give_rx_desc(vptr->rd_ring + dirty);
+ }
+
+ writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
+ vptr->rd_filled = unusable;
+}
+
+static int velocity_rx_refill(struct velocity_info *vptr)
+{
+ int dirty = vptr->rd_dirty, done = 0, ret = 0;
+
+ do {
+ struct rx_desc *rd = vptr->rd_ring + dirty;
+
+ /* Fine for an all zero Rx desc at init time as well */
+ if (rd->rdesc0.owner == cpu_to_le32(OWNED_BY_NIC))
+ break;
+
+ if (!vptr->rd_info[dirty].skb) {
+ ret = velocity_alloc_rx_buf(vptr, dirty);
+ if (ret < 0)
+ break;
+ }
+ done++;
+ dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
+ } while (dirty != vptr->rd_curr);
+
+ if (done) {
+ vptr->rd_dirty = dirty;
+ vptr->rd_filled += done;
+ velocity_give_many_rx_descs(vptr);
+ }
+
+ return ret;
+}
+
/**
* velocity_init_rd_ring - set up receive ring
* @vptr: velocity to configure
@@ -976,9 +1043,7 @@ static void velocity_free_rings(struct velocity_info *vptr)
static int velocity_init_rd_ring(struct velocity_info *vptr)
{
- int i, ret = -ENOMEM;
- struct rx_desc *rd;
- struct velocity_rd_info *rd_info;
+ int ret = -ENOMEM;
unsigned int rsize = sizeof(struct velocity_rd_info) *
vptr->options.numrx;
@@ -987,22 +1052,14 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
goto out;
memset(vptr->rd_info, 0, rsize);
- /* Init the RD ring entries */
- for (i = 0; i < vptr->options.numrx; i++) {
- rd = &(vptr->rd_ring[i]);
- rd_info = &(vptr->rd_info[i]);
+ vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
- ret = velocity_alloc_rx_buf(vptr, i);
- if (ret < 0) {
- VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
- "%s: failed to allocate RX buffer.\n",
- vptr->dev->name);
- velocity_free_rd_ring(vptr);
- goto out;
- }
- rd->rdesc0.owner = OWNED_BY_NIC;
+ ret = velocity_rx_refill(vptr);
+ if (ret < 0) {
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s: failed to allocate RX buffer.\n", vptr->dev->name);
+ velocity_free_rd_ring(vptr);
}
- vptr->rd_used = vptr->rd_curr = 0;
out:
return ret;
}
@@ -1025,7 +1082,7 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
for (i = 0; i < vptr->options.numrx; i++) {
struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
- if (!rd_info->skb_dma)
+ if (!rd_info->skb)
continue;
pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
PCI_DMA_FROMDEVICE);
@@ -1146,22 +1203,14 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
static int velocity_rx_srv(struct velocity_info *vptr, int status)
{
- struct rx_desc *rd;
struct net_device_stats *stats = &vptr->stats;
- struct mac_regs * regs = vptr->mac_regs;
int rd_curr = vptr->rd_curr;
int works = 0;
while (1) {
+ struct rx_desc *rd = vptr->rd_ring + rd_curr;
- rd = &(vptr->rd_ring[rd_curr]);
-
- if ((vptr->rd_info[rd_curr]).skb == NULL) {
- if (velocity_alloc_rx_buf(vptr, rd_curr) < 0)
- break;
- }
-
- if (works++ > 15)
+ if (!vptr->rd_info[rd_curr].skb || (works++ > 15))
break;
if (rd->rdesc0.owner == OWNED_BY_NIC)
@@ -1169,17 +1218,10 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
/*
* Don't drop CE or RL error frame although RXOK is off
- * FIXME: need to handle copybreak
*/
if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
- if (velocity_receive_frame(vptr, rd_curr) == 0) {
- if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) {
- VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not allocate rx buf\n", vptr->dev->name);
- break;
- }
- } else {
+ if (velocity_receive_frame(vptr, rd_curr) < 0)
stats->rx_dropped++;
- }
} else {
if (rd->rdesc0.RSR & RSR_CRC)
stats->rx_crc_errors++;
@@ -1191,25 +1233,18 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
rd->inten = 1;
- if (++vptr->rd_used >= 4) {
- int i, rd_prev = rd_curr;
- for (i = 0; i < 4; i++) {
- if (--rd_prev < 0)
- rd_prev = vptr->options.numrx - 1;
-
- rd = &(vptr->rd_ring[rd_prev]);
- rd->rdesc0.owner = OWNED_BY_NIC;
- }
- writew(4, &(regs->RBRDU));
- vptr->rd_used -= 4;
- }
-
vptr->dev->last_rx = jiffies;
rd_curr++;
if (rd_curr >= vptr->options.numrx)
rd_curr = 0;
}
+
+ if (velocity_rx_refill(vptr) < 0) {
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s: rx buf allocation failure\n", vptr->dev->name);
+ }
+
vptr->rd_curr = rd_curr;
VAR_USED(stats);
return works;
@@ -1242,6 +1277,65 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
}
/**
+ * velocity_rx_copy - in place Rx copy for small packets
+ * @rx_skb: network layer packet buffer candidate
+ * @pkt_size: received data size
+ * @rd: receive packet descriptor
+ * @dev: network device
+ *
+ * Replace the current skb that is scheduled for Rx processing by a
+ * shorter, immediatly allocated skb, if the received packet is small
+ * enough. This function returns a negative value if the received
+ * packet is too big or if memory is exhausted.
+ */
+static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
+ struct velocity_info *vptr)
+{
+ int ret = -1;
+
+ if (pkt_size < rx_copybreak) {
+ struct sk_buff *new_skb;
+
+ new_skb = dev_alloc_skb(pkt_size + 2);
+ if (new_skb) {
+ new_skb->dev = vptr->dev;
+ new_skb->ip_summed = rx_skb[0]->ip_summed;
+
+ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
+ skb_reserve(new_skb, 2);
+
+ memcpy(new_skb->data, rx_skb[0]->tail, pkt_size);
+ *rx_skb = new_skb;
+ ret = 0;
+ }
+
+ }
+ return ret;
+}
+
+/**
+ * velocity_iph_realign - IP header alignment
+ * @vptr: velocity we are handling
+ * @skb: network layer packet buffer
+ * @pkt_size: received data size
+ *
+ * Align IP header on a 2 bytes boundary. This behavior can be
+ * configured by the user.
+ */
+static inline void velocity_iph_realign(struct velocity_info *vptr,
+ struct sk_buff *skb, int pkt_size)
+{
+ /* FIXME - memmove ? */
+ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
+ int i;
+
+ for (i = pkt_size; i >= 0; i--)
+ *(skb->data + i + 2) = *(skb->data + i);
+ skb_reserve(skb, 2);
+ }
+}
+
+/**
* velocity_receive_frame - received packet processor
* @vptr: velocity we are handling
* @idx: ring index
@@ -1252,9 +1346,11 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{
+ void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]);
+ int pkt_len = rd->rdesc0.len;
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
@@ -1269,22 +1365,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
skb = rd_info->skb;
skb->dev = vptr->dev;
- pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- rd_info->skb_dma = (dma_addr_t) NULL;
- rd_info->skb = NULL;
-
- /* FIXME - memmove ? */
- if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
- int i;
- for (i = rd->rdesc0.len + 4; i >= 0; i--)
- *(skb->data + i + 2) = *(skb->data + i);
- skb->data += 2;
- skb->tail += 2;
- }
-
- skb_put(skb, (rd->rdesc0.len - 4));
- skb->protocol = eth_type_trans(skb, skb->dev);
+ pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
+ vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
/*
* Drop frame not meeting IEEE 802.3
@@ -1297,13 +1379,23 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
}
}
+ pci_action = pci_dma_sync_single_for_device;
+
velocity_rx_csum(rd, skb);
-
- /*
- * FIXME: need rx_copybreak handling
- */
- stats->rx_bytes += skb->len;
+ if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
+ velocity_iph_realign(vptr, skb, pkt_len);
+ pci_action = pci_unmap_single;
+ rd_info->skb = NULL;
+ }
+
+ pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, pkt_len - 4);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ stats->rx_bytes += pkt_len;
netif_rx(skb);
return 0;
@@ -1963,32 +2055,6 @@ static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
/**
- * ether_crc - ethernet CRC function
- *
- * Compute an ethernet CRC hash of the data block provided. This
- * is not performance optimised but is not needed in performance
- * critical code paths.
- *
- * FIXME: could we use shared code here ?
- */
-
-static inline u32 ether_crc(int length, unsigned char *data)
-{
- static unsigned const ethernet_polynomial = 0x04c11db7U;
-
- int crc = -1;
-
- while (--length >= 0) {
- unsigned char current_octet = *data++;
- int bit;
- for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
- crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
- }
- }
- return crc;
-}
-
-/**
* velocity_set_multi - filter list change callback
* @dev: network device
*
@@ -2123,13 +2189,13 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*/
static struct pci_driver velocity_driver = {
- name:VELOCITY_NAME,
- id_table:velocity_id_table,
- probe:velocity_found1,
- remove:velocity_remove1,
+ .name = VELOCITY_NAME,
+ .id_table = velocity_id_table,
+ .probe = velocity_found1,
+ .remove = __devexit_p(velocity_remove1),
#ifdef CONFIG_PM
- suspend:velocity_suspend,
- resume:velocity_resume,
+ .suspend = velocity_suspend,
+ .resume = velocity_resume,
#endif
};
@@ -2147,9 +2213,6 @@ static int __init velocity_init_module(void)
int ret;
ret = pci_module_init(&velocity_driver);
-#ifdef CONFIG_PM
- register_inetaddr_notifier(&velocity_inetaddr_notifier);
-#endif
return ret;
}
@@ -2165,7 +2228,10 @@ static int __init velocity_init_module(void)
static void __exit velocity_cleanup_module(void)
{
#ifdef CONFIG_PM
- unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
+ if (velocity_notifier_registered) {
+ unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
+ velocity_notifier_registered = 0;
+ }
#endif
pci_unregister_driver(&velocity_driver);
}
@@ -2992,172 +3058,6 @@ static void velocity_restore_context(struct velocity_info *vptr, struct velocity
}
-static int velocity_suspend(struct pci_dev *pdev, u32 state)
-{
- struct velocity_info *vptr = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if(!netif_running(vptr->dev))
- return 0;
-
- netif_device_detach(vptr->dev);
-
- spin_lock_irqsave(&vptr->lock, flags);
- pci_save_state(pdev, vptr->pci_state);
-#ifdef ETHTOOL_GWOL
- if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
- velocity_get_ip(vptr);
- velocity_save_context(vptr, &vptr->context);
- velocity_shutdown(vptr);
- velocity_set_wol(vptr);
- pci_enable_wake(pdev, 3, 1);
- pci_set_power_state(pdev, 3);
- } else {
- velocity_save_context(vptr, &vptr->context);
- velocity_shutdown(vptr);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, state);
- }
-#else
- pci_set_power_state(pdev, state);
-#endif
- spin_unlock_irqrestore(&vptr->lock, flags);
- return 0;
-}
-
-static int velocity_resume(struct pci_dev *pdev)
-{
- struct velocity_info *vptr = pci_get_drvdata(pdev);
- unsigned long flags;
- int i;
-
- if(!netif_running(vptr->dev))
- return 0;
-
- pci_set_power_state(pdev, 0);
- pci_enable_wake(pdev, 0, 0);
- pci_restore_state(pdev, vptr->pci_state);
-
- mac_wol_reset(vptr->mac_regs);
-
- spin_lock_irqsave(&vptr->lock, flags);
- velocity_restore_context(vptr, &vptr->context);
- velocity_init_registers(vptr, VELOCITY_INIT_WOL);
- mac_disable_int(vptr->mac_regs);
-
- velocity_tx_srv(vptr, 0);
-
- for (i = 0; i < vptr->num_txq; i++) {
- if (vptr->td_used[i]) {
- mac_tx_queue_wake(vptr->mac_regs, i);
- }
- }
-
- mac_enable_int(vptr->mac_regs);
- spin_unlock_irqrestore(&vptr->lock, flags);
- netif_device_attach(vptr->dev);
-
- return 0;
-}
-
-static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
-{
- struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
- struct net_device *dev;
- struct velocity_info *vptr;
-
- if (ifa) {
- dev = ifa->ifa_dev->dev;
- vptr = dev->priv;
- velocity_get_ip(vptr);
- }
- return NOTIFY_DONE;
-}
-#endif
-
-/*
- * Purpose: Functions to set WOL.
- */
-
-const static unsigned short crc16_tab[256] = {
- 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
- 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
- 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
- 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
- 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
- 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
- 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
- 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
- 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
- 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
- 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
- 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
- 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
- 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
- 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
- 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
- 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
- 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
- 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
- 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
- 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
- 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
- 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
- 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
- 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
- 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
- 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
- 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
- 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
- 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
- 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
- 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
-};
-
-
-static u32 mask_pattern[2][4] = {
- {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
- {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
-};
-
-/**
- * ether_crc16 - compute ethernet CRC
- * @len: buffer length
- * @cp: buffer
- * @crc16: initial CRC
- *
- * Compute a CRC value for a block of data.
- * FIXME: can we use generic functions ?
- */
-
-static u16 ether_crc16(int len, u8 * cp, u16 crc16)
-{
- while (len--)
- crc16 = (crc16 >> 8) ^ crc16_tab[(crc16 ^ *cp++) & 0xff];
- return (crc16);
-}
-
-/**
- * bit_reverse - 16bit reverse
- * @data: 16bit data t reverse
- *
- * Reverse the order of a 16bit value and return the reversed bits
- */
-
-static u16 bit_reverse(u16 data)
-{
- u32 new = 0x00000000;
- int ii;
-
-
- for (ii = 0; ii < 16; ii++) {
- new |= ((u32) (data & 1) << (31 - ii));
- data >>= 1;
- }
-
- return (u16) (new >> 16);
-}
-
/**
* wol_calc_crc - WOL CRC
* @pattern: data pattern
@@ -3166,7 +3066,7 @@ static u16 bit_reverse(u16 data)
* Compute the wake on lan crc hashes for the packet header
* we are interested in.
*/
-
+
u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern)
{
u16 crc = 0xFFFF;
@@ -3186,12 +3086,12 @@ u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern)
continue;
}
mask >>= 1;
- crc = ether_crc16(1, &(pattern[i * 8 + j]), crc);
+ crc = crc16(crc, &(pattern[i * 8 + j]), 1);
}
}
/* Finally, invert the result once to get the correct data */
crc = ~crc;
- return bit_reverse(crc);
+ return bitreverse(crc) >> 16;
}
/**
@@ -3203,13 +3103,18 @@ u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern)
*
* FIXME: check static buffer is safe here
*/
-
+
static int velocity_set_wol(struct velocity_info *vptr)
{
struct mac_regs * regs = vptr->mac_regs;
static u8 buf[256];
int i;
+ static u32 mask_pattern[2][4] = {
+ {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
+ {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
+ };
+
writew(0xFFFF, &regs->WOLCRClr);
writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
@@ -3236,7 +3141,8 @@ static int velocity_set_wol(struct velocity_info *vptr)
memcpy(arp->ar_tip, vptr->ip_addr, 4);
- crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, (u8 *) & mask_pattern[0][0]);
+ crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
+ (u8 *) & mask_pattern[0][0]);
writew(crc, &regs->PatternCRC[0]);
writew(WOLCR_ARP_EN, &regs->WOLCRSet);
@@ -3275,3 +3181,85 @@ static int velocity_set_wol(struct velocity_info *vptr)
return 0;
}
+static int velocity_suspend(struct pci_dev *pdev, u32 state)
+{
+ struct velocity_info *vptr = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if(!netif_running(vptr->dev))
+ return 0;
+
+ netif_device_detach(vptr->dev);
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ pci_save_state(pdev, vptr->pci_state);
+#ifdef ETHTOOL_GWOL
+ if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
+ velocity_get_ip(vptr);
+ velocity_save_context(vptr, &vptr->context);
+ velocity_shutdown(vptr);
+ velocity_set_wol(vptr);
+ pci_enable_wake(pdev, 3, 1);
+ pci_set_power_state(pdev, 3);
+ } else {
+ velocity_save_context(vptr, &vptr->context);
+ velocity_shutdown(vptr);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, state);
+ }
+#else
+ pci_set_power_state(pdev, state);
+#endif
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ return 0;
+}
+
+static int velocity_resume(struct pci_dev *pdev)
+{
+ struct velocity_info *vptr = pci_get_drvdata(pdev);
+ unsigned long flags;
+ int i;
+
+ if(!netif_running(vptr->dev))
+ return 0;
+
+ pci_set_power_state(pdev, 0);
+ pci_enable_wake(pdev, 0, 0);
+ pci_restore_state(pdev, vptr->pci_state);
+
+ mac_wol_reset(vptr->mac_regs);
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ velocity_restore_context(vptr, &vptr->context);
+ velocity_init_registers(vptr, VELOCITY_INIT_WOL);
+ mac_disable_int(vptr->mac_regs);
+
+ velocity_tx_srv(vptr, 0);
+
+ for (i = 0; i < vptr->num_txq; i++) {
+ if (vptr->td_used[i]) {
+ mac_tx_queue_wake(vptr->mac_regs, i);
+ }
+ }
+
+ mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ netif_device_attach(vptr->dev);
+
+ return 0;
+}
+
+static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev;
+ struct velocity_info *vptr;
+
+ if (ifa) {
+ dev = ifa->ifa_dev->dev;
+ vptr = dev->priv;
+ velocity_get_ip(vptr);
+ }
+ return NOTIFY_DONE;
+}
+#endif
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 2175b86968a92e..630a466209807e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -37,7 +37,6 @@
#define OPTION_DEFAULT { [0 ... MAX_UNITS-1] = -1}
#define REV_ID_VT6110 (0)
-#define DEVICE_ID (0x3119)
#define BYTE_REG_BITS_ON(x,p) do { writeb(readb((p))|(x),(p));} while (0)
#define WORD_REG_BITS_ON(x,p) do { writew(readw((p))|(x),(p));} while (0)
@@ -1772,7 +1771,8 @@ struct velocity_info {
struct velocity_td_info *td_infos[TX_QUEUE_NO];
int rd_curr;
- int rd_used;
+ int rd_dirty;
+ u32 rd_filled;
struct rx_desc *rd_ring;
struct velocity_rd_info *rd_info; /* It's an array */
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 20bd0dfef9da0a..915a8af77b45f9 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -1,4 +1,4 @@
-/* airport.c 0.13e
+/* airport.c
*
* A driver for "Hermes" chipset based Apple Airport wireless
* card.
@@ -11,6 +11,9 @@
* 0.06 : fix possible hang on powerup, add sleep support
*/
+#define DRIVER_NAME "airport"
+#define PFX DRIVER_NAME ": "
+
#include <linux/config.h>
#include <linux/module.h>
@@ -50,7 +53,7 @@ static int
airport_suspend(struct macio_dev *mdev, u32 state)
{
struct net_device *dev = dev_get_drvdata(&mdev->ofdev.dev);
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
unsigned long flags;
int err;
@@ -84,7 +87,7 @@ static int
airport_resume(struct macio_dev *mdev)
{
struct net_device *dev = dev_get_drvdata(&mdev->ofdev.dev);
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
unsigned long flags;
int err;
@@ -126,7 +129,7 @@ static int
airport_detach(struct macio_dev *mdev)
{
struct net_device *dev = dev_get_drvdata(&mdev->ofdev.dev);
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
struct airport *card = priv->card;
if (card->ndev_registered)
@@ -194,24 +197,24 @@ airport_attach(struct macio_dev *mdev, const struct of_match *match)
hermes_t *hw;
if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) {
- printk(KERN_ERR "airport: wrong interrupt/addresses in OF tree\n");
+ printk(KERN_ERR PFX "wrong interrupt/addresses in OF tree\n");
return -ENODEV;
}
/* Allocate space for private device-specific data */
dev = alloc_orinocodev(sizeof(*card), airport_hard_reset);
if (! dev) {
- printk(KERN_ERR "airport: can't allocate device datas\n");
+ printk(KERN_ERR PFX "can't allocate device datas\n");
return -ENODEV;
}
- priv = dev->priv;
+ priv = netdev_priv(dev);
card = priv->card;
hw = &priv->hw;
card->mdev = mdev;
if (macio_request_resource(mdev, 0, "airport")) {
- printk(KERN_ERR "airport: can't request IO resource !\n");
+ printk(KERN_ERR PFX "can't request IO resource !\n");
free_netdev(dev);
return -EBUSY;
}
@@ -224,11 +227,11 @@ airport_attach(struct macio_dev *mdev, const struct of_match *match)
/* Setup interrupts & base address */
dev->irq = macio_irq(mdev, 0);
phys_addr = macio_resource_start(mdev, 0); /* Physical address */
- printk(KERN_DEBUG "Airport at physical address %lx\n", phys_addr);
+ printk(KERN_DEBUG PFX "Airport at physical address %lx\n", phys_addr);
dev->base_addr = phys_addr;
card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN);
if (!card->vaddr) {
- printk("airport: ioremap() failed\n");
+ printk(PFX "ioremap() failed\n");
goto failed;
}
@@ -244,17 +247,17 @@ airport_attach(struct macio_dev *mdev, const struct of_match *match)
hermes_init(hw);
if (request_irq(dev->irq, orinoco_interrupt, 0, "Airport", dev)) {
- printk(KERN_ERR "airport: Couldn't get IRQ %d\n", dev->irq);
+ printk(KERN_ERR PFX "Couldn't get IRQ %d\n", dev->irq);
goto failed;
}
card->irq_requested = 1;
/* Tell the stack we exist */
if (register_netdev(dev) != 0) {
- printk(KERN_ERR "airport: register_netdev() failed\n");
+ printk(KERN_ERR PFX "register_netdev() failed\n");
goto failed;
}
- printk(KERN_DEBUG "airport: card registered for interface %s\n", dev->name);
+ printk(KERN_DEBUG PFX "card registered for interface %s\n", dev->name);
card->ndev_registered = 1;
return 0;
failed:
@@ -263,7 +266,8 @@ airport_attach(struct macio_dev *mdev, const struct of_match *match)
} /* airport_attach */
-static char version[] __initdata = "airport.c 0.13e (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("Driver for the Apple Airport wireless card.");
MODULE_LICENSE("Dual MPL/GPL");
@@ -280,7 +284,7 @@ static struct of_match airport_match[] =
static struct macio_driver airport_driver =
{
- .name = "airport",
+ .name = DRIVER_NAME,
.match_table = airport_match,
.probe = airport_attach,
.remove = airport_detach,
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index d5ec3dedb5da78..7300901185d9d9 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -13,8 +13,8 @@
* (wvlan_hcf.c) library, and the NetBSD wireless driver (in no
* particular order).
*
- * Copyright (C) 2000, David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
- * Copyright (C) 2001, David Gibson, IBM <hermes@gibson.dropbear.id.au>
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia.
+ * (C) Copyright David Gibson, IBM Corp. 2001-2003.
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
@@ -53,10 +53,9 @@
#include "hermes.h"
MODULE_DESCRIPTION("Low-level driver helper for Lucent Hermes chipset and Prism II HFA384x wireless MAC controller");
-MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
-#ifdef MODULE_LICENSE
+MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>"
+ " & David Gibson <hermes@gibson.dropbear.id.au>");
MODULE_LICENSE("Dual MPL/GPL");
-#endif
/* These are maximum timeouts. Most often, card wil react much faster */
#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */
@@ -226,7 +225,7 @@ int hermes_init(hermes_t *hw)
*
* Callable from any context, but locking is your problem. */
int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
- hermes_response_t *resp)
+ struct hermes_response *resp)
{
int err;
int k;
@@ -392,7 +391,6 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
return -EIO;
}
-
return 0;
}
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 126923ffddef08..429d5724069d28 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -12,7 +12,8 @@
* project, the Linux wvlan_cs driver, Lucent's HCF-Light
* (wvlan_hcf.c) library, and the NetBSD wireless driver.
*
- * Copyright (C) 2000, David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia.
+ * (C) Copyright David Gibson, IBM Corp. 2001-2003.
*
* Portions taken from hfa384x.h, Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
*
@@ -124,29 +125,52 @@
/*
* Command codes
*/
-/*--- Controller Commands --------------------------*/
+/*--- Controller Commands ----------------------------*/
#define HERMES_CMD_INIT (0x0000)
#define HERMES_CMD_ENABLE (0x0001)
#define HERMES_CMD_DISABLE (0x0002)
#define HERMES_CMD_DIAG (0x0003)
-/*--- Buffer Mgmt Commands --------------------------*/
+/*--- Buffer Mgmt Commands ---------------------------*/
#define HERMES_CMD_ALLOC (0x000A)
#define HERMES_CMD_TX (0x000B)
-#define HERMES_CMD_CLRPRST (0x0012)
-/*--- Regulate Commands --------------------------*/
+/*--- Regulate Commands ------------------------------*/
#define HERMES_CMD_NOTIFY (0x0010)
#define HERMES_CMD_INQUIRE (0x0011)
-/*--- Configure Commands --------------------------*/
+/*--- Configure Commands -----------------------------*/
#define HERMES_CMD_ACCESS (0x0021)
#define HERMES_CMD_DOWNLD (0x0022)
+/*--- Serial I/O Commands ----------------------------*/
+#define HERMES_CMD_READMIF (0x0030)
+#define HERMES_CMD_WRITEMIF (0x0031)
+
/*--- Debugging Commands -----------------------------*/
-#define HERMES_CMD_MONITOR (0x0038)
-#define HERMES_MONITOR_ENABLE (0x000b)
-#define HERMES_MONITOR_DISABLE (0x000f)
+#define HERMES_CMD_TEST (0x0038)
+
+
+/* Test command arguments */
+#define HERMES_TEST_SET_CHANNEL 0x0800
+#define HERMES_TEST_MONITOR 0x0b00
+#define HERMES_TEST_STOP 0x0f00
+
+/* Authentication algorithms */
+#define HERMES_AUTH_OPEN 1
+#define HERMES_AUTH_SHARED_KEY 2
+
+/* WEP settings */
+#define HERMES_WEP_PRIVACY_INVOKED 0x0001
+#define HERMES_WEP_EXCL_UNENCRYPTED 0x0002
+#define HERMES_WEP_HOST_ENCRYPT 0x0010
+#define HERMES_WEP_HOST_DECRYPT 0x0080
+
+/* Symbol hostscan options */
+#define HERMES_HOSTSCAN_SYMBOL_5SEC 0x0001
+#define HERMES_HOSTSCAN_SYMBOL_ONCE 0x0002
+#define HERMES_HOSTSCAN_SYMBOL_PASSIVE 0x0040
+#define HERMES_HOSTSCAN_SYMBOL_BCAST 0x0080
/*
* Frame structures and constants
@@ -157,16 +181,6 @@
#define HERMES_802_3_OFFSET (14+32)
#define HERMES_802_2_OFFSET (14+32+14)
-struct hermes_rx_descriptor {
- u16 status;
- u32 time;
- u8 silence;
- u8 signal;
- u8 rate;
- u8 rxflow;
- u32 reserved;
-} __attribute__ ((packed));
-
#define HERMES_RXSTAT_ERR (0x0003)
#define HERMES_RXSTAT_BADCRC (0x0001)
#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
@@ -201,7 +215,11 @@ struct hermes_tx_descriptor {
#define HERMES_INQ_TALLIES (0xF100)
#define HERMES_INQ_SCAN (0xF101)
+#define HERMES_INQ_CHANNELINFO (0xF102)
+#define HERMES_INQ_HOSTSCAN (0xF103)
+#define HERMES_INQ_HOSTSCAN_SYMBOL (0xF104)
#define HERMES_INQ_LINKSTATUS (0xF200)
+#define HERMES_INQ_SEC_STAT_AGERE (0xF202)
struct hermes_tallies_frame {
u16 TxUnicastFrames;
@@ -233,23 +251,58 @@ struct hermes_tallies_frame {
/* Grabbed from wlan-ng - Thanks Mark... - Jean II
* This is the result of a scan inquiry command */
/* Structure describing info about an Access Point */
-struct hermes_scan_apinfo {
+struct prism2_scan_apinfo {
u16 channel; /* Channel where the AP sits */
u16 noise; /* Noise level */
u16 level; /* Signal level */
u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
- u16 beacon_interv; /* Beacon interval ? */
- u16 capabilities; /* Capabilities ? */
+ u16 beacon_interv; /* Beacon interval */
+ u16 capabilities; /* Capabilities */
+ u16 essid_len; /* ESSID length */
u8 essid[32]; /* ESSID of the network */
u8 rates[10]; /* Bit rate supported */
- u16 proberesp_rate; /* ???? */
+ u16 proberesp_rate; /* Data rate of the response frame */
+ u16 atim; /* ATIM window time, Kus (hostscan only) */
} __attribute__ ((packed));
-/* Container */
-struct hermes_scan_frame {
- u16 rsvd; /* ??? */
- u16 scanreason; /* ??? */
- struct hermes_scan_apinfo aps[35]; /* Scan result */
+
+/* Same stuff for the Lucent/Agere card.
+ * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
+struct agere_scan_apinfo {
+ u16 channel; /* Channel where the AP sits */
+ u16 noise; /* Noise level */
+ u16 level; /* Signal level */
+ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
+ u16 beacon_interv; /* Beacon interval */
+ u16 capabilities; /* Capabilities */
+ /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
+ u16 essid_len; /* ESSID length */
+ u8 essid[32]; /* ESSID of the network */
} __attribute__ ((packed));
+
+/* Moustafa: Scan structure for Symbol cards */
+struct symbol_scan_apinfo {
+ u8 channel; /* Channel where the AP sits */
+ u8 unknown1; /* 8 in 2.9x and 3.9x f/w, 0 otherwise */
+ u16 noise; /* Noise level */
+ u16 level; /* Signal level */
+ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
+ u16 beacon_interv; /* Beacon interval */
+ u16 capabilities; /* Capabilities */
+ /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
+ u16 essid_len; /* ESSID length */
+ u8 essid[32]; /* ESSID of the network */
+ u16 rates[5]; /* Bit rate supported */
+ u16 basic_rates; /* Basic rates bitmask */
+ u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
+ u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
+} __attribute__ ((packed));
+
+union hermes_scan_info {
+ struct agere_scan_apinfo a;
+ struct prism2_scan_apinfo p;
+ struct symbol_scan_apinfo s;
+};
+
#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
#define HERMES_LINKSTATUS_CONNECTED (0x0001)
#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
@@ -262,6 +315,20 @@ struct hermes_linkstatus {
u16 linkstatus; /* Link status */
} __attribute__ ((packed));
+struct hermes_response {
+ u16 status, resp0, resp1, resp2;
+};
+
+/* "ID" structure - used for ESSID and station nickname */
+struct hermes_idstring {
+ u16 len;
+ u16 val[16];
+} __attribute__ ((packed));
+
+struct hermes_multicast {
+ u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
+} __attribute__ ((packed));
+
// #define HERMES_DEBUG_BUFFER 1
#define HERMES_DEBUG_BUFSIZE 4096
struct hermes_debug_entry {
@@ -294,10 +361,6 @@ typedef struct hermes {
#endif
} hermes_t;
-typedef struct hermes_response {
- u16 status, resp0, resp1, resp2;
-} hermes_response_t;
-
/* Register access convenience macros */
#define hermes_read_reg(hw, off) ((hw)->io_space ? \
inw((hw)->iobase + ( (off) << (hw)->reg_spacing )) : \
@@ -312,9 +375,11 @@ typedef struct hermes_response {
#define hermes_write_regn(hw, name, val) hermes_write_reg((hw), HERMES_##name, (val))
/* Function prototypes */
-void hermes_struct_init(hermes_t *hw, ulong address, int io_space, int reg_spacing);
+void hermes_struct_init(hermes_t *hw, ulong address, int io_space,
+ int reg_spacing);
int hermes_init(hermes_t *hw);
-int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, hermes_response_t *resp);
+int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp);
int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
diff --git a/drivers/net/wireless/hermes_rid.h b/drivers/net/wireless/hermes_rid.h
index 761c5422ed4dec..4f46b4809e554d 100644
--- a/drivers/net/wireless/hermes_rid.h
+++ b/drivers/net/wireless/hermes_rid.h
@@ -4,21 +4,21 @@
/*
* Configuration RIDs
*/
-#define HERMES_RID_CNFPORTTYPE 0xFC00 /* used */
-#define HERMES_RID_CNFOWNMACADDR 0xFC01 /* used */
-#define HERMES_RID_CNFDESIREDSSID 0xFC02 /* used */
-#define HERMES_RID_CNFOWNCHANNEL 0xFC03 /* used */
-#define HERMES_RID_CNFOWNSSID 0xFC04 /* used */
+#define HERMES_RID_CNFPORTTYPE 0xFC00
+#define HERMES_RID_CNFOWNMACADDR 0xFC01
+#define HERMES_RID_CNFDESIREDSSID 0xFC02
+#define HERMES_RID_CNFOWNCHANNEL 0xFC03
+#define HERMES_RID_CNFOWNSSID 0xFC04
#define HERMES_RID_CNFOWNATIMWINDOW 0xFC05
-#define HERMES_RID_CNFSYSTEMSCALE 0xFC06 /* used */
+#define HERMES_RID_CNFSYSTEMSCALE 0xFC06
#define HERMES_RID_CNFMAXDATALEN 0xFC07
#define HERMES_RID_CNFWDSADDRESS 0xFC08
-#define HERMES_RID_CNFPMENABLED 0xFC09 /* used */
+#define HERMES_RID_CNFPMENABLED 0xFC09
#define HERMES_RID_CNFPMEPS 0xFC0A
-#define HERMES_RID_CNFMULTICASTRECEIVE 0xFC0B /* used */
-#define HERMES_RID_CNFMAXSLEEPDURATION 0xFC0C /* used */
-#define HERMES_RID_CNFPMHOLDOVERDURATION 0xFC0D /* used */
-#define HERMES_RID_CNFOWNNAME 0xFC0E /* used */
+#define HERMES_RID_CNFMULTICASTRECEIVE 0xFC0B
+#define HERMES_RID_CNFMAXSLEEPDURATION 0xFC0C
+#define HERMES_RID_CNFPMHOLDOVERDURATION 0xFC0D
+#define HERMES_RID_CNFOWNNAME 0xFC0E
#define HERMES_RID_CNFOWNDTIMPERIOD 0xFC10
#define HERMES_RID_CNFWDSADDRESS1 0xFC11
#define HERMES_RID_CNFWDSADDRESS2 0xFC12
@@ -27,17 +27,18 @@
#define HERMES_RID_CNFWDSADDRESS5 0xFC15
#define HERMES_RID_CNFWDSADDRESS6 0xFC16
#define HERMES_RID_CNFMULTICASTPMBUFFERING 0xFC17
-#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20 /* used */
+#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20
+#define HERMES_RID_CNFAUTHENTICATION_AGERE 0xFC21
#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21
-#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23 /* used */
-#define HERMES_RID_CNFDEFAULTKEY0 0xFC24 /* used */
-#define HERMES_RID_CNFDEFAULTKEY1 0xFC25 /* used */
-#define HERMES_RID_CNFMWOROBUST_AGERE 0xFC25 /* used */
-#define HERMES_RID_CNFDEFAULTKEY2 0xFC26 /* used */
-#define HERMES_RID_CNFDEFAULTKEY3 0xFC27 /* used */
-#define HERMES_RID_CNFWEPFLAGS_INTERSIL 0xFC28 /* used */
+#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23
+#define HERMES_RID_CNFDEFAULTKEY0 0xFC24
+#define HERMES_RID_CNFDEFAULTKEY1 0xFC25
+#define HERMES_RID_CNFMWOROBUST_AGERE 0xFC25
+#define HERMES_RID_CNFDEFAULTKEY2 0xFC26
+#define HERMES_RID_CNFDEFAULTKEY3 0xFC27
+#define HERMES_RID_CNFWEPFLAGS_INTERSIL 0xFC28
#define HERMES_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
-#define HERMES_RID_CNFAUTHENTICATION 0xFC2A /* used */
+#define HERMES_RID_CNFAUTHENTICATION 0xFC2A
#define HERMES_RID_CNFMAXASSOCSTA 0xFC2B
#define HERMES_RID_CNFKEYLENGTH_SYMBOL 0xFC2B
#define HERMES_RID_CNFTXCONTROL 0xFC2C
@@ -53,14 +54,14 @@
#define HERMES_RID_CNFTIMCTRL 0xFC40
#define HERMES_RID_CNFTHIRTY2TALLY 0xFC42
#define HERMES_RID_CNFENHSECURITY 0xFC43
-#define HERMES_RID_CNFGROUPADDRESSES 0xFC80 /* used */
-#define HERMES_RID_CNFCREATEIBSS 0xFC81 /* used */
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD 0xFC82 /* used */
-#define HERMES_RID_CNFRTSTHRESHOLD 0xFC83 /* used */
-#define HERMES_RID_CNFTXRATECONTROL 0xFC84 /* used */
-#define HERMES_RID_CNFPROMISCUOUSMODE 0xFC85 /* used */
+#define HERMES_RID_CNFGROUPADDRESSES 0xFC80
+#define HERMES_RID_CNFCREATEIBSS 0xFC81
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD 0xFC82
+#define HERMES_RID_CNFRTSTHRESHOLD 0xFC83
+#define HERMES_RID_CNFTXRATECONTROL 0xFC84
+#define HERMES_RID_CNFPROMISCUOUSMODE 0xFC85
#define HERMES_RID_CNFBASICRATES_SYMBOL 0xFC8A
-#define HERMES_RID_CNFPREAMBLE_SYMBOL 0xFC8C /* used */
+#define HERMES_RID_CNFPREAMBLE_SYMBOL 0xFC8C
#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD0 0xFC90
#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD1 0xFC91
#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD2 0xFC92
@@ -75,18 +76,21 @@
#define HERMES_RID_CNFRTSTHRESHOLD4 0xFC9B
#define HERMES_RID_CNFRTSTHRESHOLD5 0xFC9C
#define HERMES_RID_CNFRTSTHRESHOLD6 0xFC9D
+#define HERMES_RID_CNFHOSTSCAN_SYMBOL 0xFCAB
#define HERMES_RID_CNFSHORTPREAMBLE 0xFCB0
-#define HERMES_RID_CNFWEPKEYS_AGERE 0xFCB0 /* used */
+#define HERMES_RID_CNFWEPKEYS_AGERE 0xFCB0
#define HERMES_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
-#define HERMES_RID_CNFTXKEY_AGERE 0xFCB1 /* used */
+#define HERMES_RID_CNFTXKEY_AGERE 0xFCB1
#define HERMES_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
+#define HERMES_RID_CNFSCANSSID_AGERE 0xFCB2
#define HERMES_RID_CNFBASICRATES 0xFCB3
#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4
-#define HERMES_RID_CNFTICKTIME 0xFCE0 /* used */
+#define HERMES_RID_CNFTICKTIME 0xFCE0
#define HERMES_RID_CNFSCANREQUEST 0xFCE1
#define HERMES_RID_CNFJOINREQUEST 0xFCE2
#define HERMES_RID_CNFAUTHENTICATESTATION 0xFCE3
#define HERMES_RID_CNFCHANNELINFOREQUEST 0xFCE4
+#define HERMES_RID_CNFHOSTSCAN 0xFCE5
/*
* Information RIDs
@@ -100,30 +104,31 @@
#define HERMES_RID_NICID 0xFD0B
#define HERMES_RID_MFISUPRANGE 0xFD0C
#define HERMES_RID_CFISUPRANGE 0xFD0D
-#define HERMES_RID_CHANNELLIST 0xFD10 /* used */
+#define HERMES_RID_CHANNELLIST 0xFD10
#define HERMES_RID_REGULATORYDOMAINS 0xFD11
#define HERMES_RID_TEMPTYPE 0xFD12
#define HERMES_RID_CIS 0xFD13
-#define HERMES_RID_STAID 0xFD20 /* used */
+#define HERMES_RID_STAID 0xFD20
#define HERMES_RID_STASUPRANGE 0xFD21
#define HERMES_RID_MFIACTRANGES 0xFD22
#define HERMES_RID_CFIACTRANGES2 0xFD23
-#define HERMES_RID_SECONDARYVERSION_SYMBOL 0xFD24 /* used */
+#define HERMES_RID_SECONDARYVERSION_SYMBOL 0xFD24
#define HERMES_RID_PORTSTATUS 0xFD40
-#define HERMES_RID_CURRENTSSID 0xFD41 /* used */
-#define HERMES_RID_CURRENTBSSID 0xFD42 /* used */
-#define HERMES_RID_COMMSQUALITY 0xFD43 /* used */
-#define HERMES_RID_CURRENTTXRATE 0xFD44 /* used */
+#define HERMES_RID_CURRENTSSID 0xFD41
+#define HERMES_RID_CURRENTBSSID 0xFD42
+#define HERMES_RID_COMMSQUALITY 0xFD43
+#define HERMES_RID_CURRENTTXRATE 0xFD44
#define HERMES_RID_CURRENTBEACONINTERVAL 0xFD45
#define HERMES_RID_CURRENTSCALETHRESHOLDS 0xFD46
#define HERMES_RID_PROTOCOLRSPTIME 0xFD47
-#define HERMES_RID_SHORTRETRYLIMIT 0xFD48 /* used */
-#define HERMES_RID_LONGRETRYLIMIT 0xFD49 /* used */
-#define HERMES_RID_MAXTRANSMITLIFETIME 0xFD4A /* used */
+#define HERMES_RID_SHORTRETRYLIMIT 0xFD48
+#define HERMES_RID_LONGRETRYLIMIT 0xFD49
+#define HERMES_RID_MAXTRANSMITLIFETIME 0xFD4A
#define HERMES_RID_MAXRECEIVELIFETIME 0xFD4B
#define HERMES_RID_CFPOLLABLE 0xFD4C
#define HERMES_RID_AUTHENTICATIONALGORITHMS 0xFD4D
#define HERMES_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
+#define HERMES_RID_DBMCOMMSQUALITY_INTERSIL 0xFD51
#define HERMES_RID_CURRENTTXRATE1 0xFD80
#define HERMES_RID_CURRENTTXRATE2 0xFD81
#define HERMES_RID_CURRENTTXRATE3 0xFD82
@@ -133,21 +138,11 @@
#define HERMES_RID_OWNMACADDR 0xFD86
#define HERMES_RID_SCANRESULTSTABLE 0xFD88
#define HERMES_RID_PHYTYPE 0xFDC0
-#define HERMES_RID_CURRENTCHANNEL 0xFDC1 /* used */
+#define HERMES_RID_CURRENTCHANNEL 0xFDC1
#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2
#define HERMES_RID_CCAMODE 0xFDC3
-#define HERMES_RID_SUPPORTEDDATARATES 0xFDC6 /* used */
+#define HERMES_RID_SUPPORTEDDATARATES 0xFDC6
#define HERMES_RID_BUILDSEQ 0xFFFE
#define HERMES_RID_FWID 0xFFFF
-/* "ID" structure - used for ESSID and station nickname */
-struct hermes_idstring {
- u16 len;
- u16 val[16];
-} __attribute__ ((packed));
-
-typedef struct hermes_multicast {
- u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
-} __attribute__ ((packed)) hermes_multicast_t;
-
#endif
diff --git a/drivers/net/wireless/ieee802_11.h b/drivers/net/wireless/ieee802_11.h
index 07d626ea1ba3cd..53dd5248f9f1ce 100644
--- a/drivers/net/wireless/ieee802_11.h
+++ b/drivers/net/wireless/ieee802_11.h
@@ -76,4 +76,3 @@ struct ieee802_11_hdr {
#define IEEE802_11_SCTL_SEQ 0xFFF0
#endif /* _IEEE802_11_H */
-
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 8161a1c0094933..284aabeffa2835 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -1,17 +1,23 @@
-/* orinoco.c 0.13e - (formerly known as dldwd_cs.c and orinoco_cs.c)
+/* orinoco.c - (formerly known as dldwd_cs.c and orinoco_cs.c)
*
* A driver for Hermes or Prism 2 chipset based PCMCIA wireless
* adaptors, with Lucent/Agere, Intersil or Symbol firmware.
*
- * Copyright (C) 2000 David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ * Current maintainers (as of 29 September 2003) are:
+ * Pavel Roskin <proski AT gnu.org>
+ * and David Gibson <hermes AT gibson.dropbear.id.au>
+ *
+ * (C) Copyright David Gibson, IBM Corporation 2001-2003.
+ * Copyright (C) 2000 David Gibson, Linuxcare Australia.
* With some help from :
- * Copyright (C) 2001 Jean Tourrilhes, HP Labs <jt@hpl.hp.com>
- * Copyright (C) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * Copyright (C) 2001 Jean Tourrilhes, HP Labs
+ * Copyright (C) 2001 Benjamin Herrenschmidt
*
* Based on dummy_cs.c 1.27 2000/06/12 21:27:25
*
- * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy@fasta.fh-dortmund.de>
- * http://www.fasta.fh-dortmund.de/users/andy/wvlan/
+ * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy
+ * AT fasta.fh-dortmund.de>
+ * http://www.stud.fh-dortmund.de/~andy/wvlan/
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
@@ -24,7 +30,7 @@
* limitations under the License.
*
* The initial developer of the original code is David A. Hinds
- * <dahinds@users.sourceforge.net>. Portions created by David
+ * <dahinds AT users.sourceforge.net>. Portions created by David
* A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
* Reserved.
*
@@ -58,7 +64,7 @@
* o Add PM timeout (holdover duration)
* o Enable "iwconfig eth0 key off" and friends (toggle flags)
* o Enable "iwconfig eth0 power unicast/all" (toggle flags)
- * o Try with an intel card. It report firmware 1.01, behave like
+ * o Try with an Intel card. It report firmware 1.01, behave like
* an antiquated firmware, however on windows it says 2.00. Yuck !
* o Workaround firmware bug in allocate buffer (Intel 1.01)
* o Finish external renaming to orinoco...
@@ -68,8 +74,8 @@
* o Update to Wireless 11 -> add retry limit/lifetime support
* o Tested with a D-Link DWL 650 card, fill in firmware support
* o Warning on Vcc mismatch (D-Link 3.3v card in Lucent 5v only slot)
- * o Fixed the Prims2 WEP bugs that I introduced in v0.03 :-(
- * It work on D-Link *only* after a tcpdump. Weird...
+ * o Fixed the Prism2 WEP bugs that I introduced in v0.03 :-(
+ * It works on D-Link *only* after a tcpdump. Weird...
* And still doesn't work on Intel card. Grrrr...
* o Update the mode after a setport3
* o Add preamble setting for Symbol cards (not yet enabled)
@@ -82,7 +88,7 @@
* o Clean up RID definitions in hermes.h, other cleanups
*
* v0.04b -> v0.04c - 24/4/2001 - Jean II
- * o Tim Hurley <timster@seiki.bliztech.com> reported a D-Link card
+ * o Tim Hurley <timster AT seiki.bliztech.com> reported a D-Link card
* with vendor 02 and firmware 0.08. Added in the capabilities...
* o Tested Lucent firmware 7.28, everything works...
*
@@ -105,15 +111,15 @@
* o Remove deferred power enabling code
*
* v0.05c -> v0.05d - 5/5/2001 - Jean II
- * o Workaround to SNAP decapsulate frame from LinkSys AP
- * original patch from : Dong Liu <dliu@research.bell-labs.com>
+ * o Workaround to SNAP decapsulate frame from Linksys AP
+ * original patch from : Dong Liu <dliu AT research.bell-labs.com>
* (note : the memcmp bug was mine - fixed)
* o Remove set_retry stuff, no firmware support it (bloat--).
*
* v0.05d -> v0.06 - 25/5/2001 - Jean II
- * Original patch from "Hong Lin" <alin@redhat.com>,
- * "Ian Kinner" <ikinner@redhat.com>
- * and "David Smith" <dsmith@redhat.com>
+ * Original patch from "Hong Lin" <alin AT redhat.com>,
+ * "Ian Kinner" <ikinner AT redhat.com>
+ * and "David Smith" <dsmith AT redhat.com>
* o Init of priv->tx_rate_ctrl in firmware specific section.
* o Prism2/Symbol rate, upto should be 0xF and not 0x15. Doh !
* o Spectrum card always need cor_reset (for every reset)
@@ -134,15 +140,15 @@
*
* v0.06c -> v0.06d - 6/7/2001 - David Gibson
* o Change a bunch of KERN_INFO messages to KERN_DEBUG, as per Linus'
- * wishes to reduce the number of unecessary messages.
+ * wishes to reduce the number of unnecessary messages.
* o Removed bogus message on CRC error.
- * o Merged fixeds for v0.08 Prism 2 firmware from William Waghorn
- * <willwaghorn@yahoo.co.uk>
+ * o Merged fixes for v0.08 Prism 2 firmware from William Waghorn
+ * <willwaghorn AT yahoo.co.uk>
* o Slight cleanup/re-arrangement of firmware detection code.
*
* v0.06d -> v0.06e - 1/8/2001 - David Gibson
* o Removed some redundant global initializers (orinoco_cs.c).
- * o Added some module metadataa
+ * o Added some module metadata
*
* v0.06e -> v0.06f - 14/8/2001 - David Gibson
* o Wording fix to license
@@ -159,7 +165,7 @@
* v0.07 -> v0.07a - 1/10/3001 - Jean II
* o Add code to read Symbol firmware revision, inspired by latest code
* in Spectrum24 by Lee John Keyser-Allen - Thanks Lee !
- * o Thanks to Jared Valentine <hidden@xmission.com> for "providing" me
+ * o Thanks to Jared Valentine <hidden AT xmission.com> for "providing" me
* a 3Com card with a recent firmware, fill out Symbol firmware
* capabilities of latest rev (2.20), as well as older Symbol cards.
* o Disable Power Management in newer Symbol firmware, the API
@@ -172,7 +178,7 @@
* o Turned has_big_wep on for Intersil cards. That's not true for all of
* them but we should at least let the capable ones try.
* o Wait for BUSY to clear at the beginning of hermes_bap_seek(). I
- * realised that my assumption that the driver's serialization
+ * realized that my assumption that the driver's serialization
* would prevent the BAP being busy on entry was possibly false, because
* things other than seeks may make the BAP busy.
* o Use "alternate" (oui 00:00:00) encapsulation by default.
@@ -181,12 +187,12 @@
* o Don't try to make __initdata const (the version string). This can't
* work because of the way the __initdata sectioning works.
* o Added MODULE_LICENSE tags.
- * o Support for PLX (transparent PCMCIA->PCI brdge) cards.
- * o Changed to using the new type-facist min/max.
+ * o Support for PLX (transparent PCMCIA->PCI bridge) cards.
+ * o Changed to using the new type-fascist min/max.
*
* v0.08 -> v0.08a - 9/10/2001 - David Gibson
* o Inserted some missing acknowledgements/info into the Changelog.
- * o Fixed some bugs in the normalisation of signel level reporting.
+ * o Fixed some bugs in the normalization of signal level reporting.
* o Fixed bad bug in WEP key handling on Intersil and Symbol firmware,
* which led to an instant crash on big-endian machines.
*
@@ -342,7 +348,7 @@
* o Bugfix in orinoco_stop() - it used to fail if hw_unavailable
* was set, which was usually true on PCMCIA hot removes.
* o Track LINKSTATUS messages, silently drop Tx packets before
- * we are connected (avoids cofusing the firmware), and only
+ * we are connected (avoids confusing the firmware), and only
* give LINKSTATUS printk()s if the status has changed.
*
* v0.13b -> v0.13c - 11 Mar 2003 - David Gibson
@@ -397,7 +403,8 @@
* o Disconnect wireless extensions from fundamental configuration.
* o (maybe) Software WEP support (patch from Stano Meduna).
* o (maybe) Use multiple Tx buffers - driver handling queue
- * rather than firmware. */
+ * rather than firmware.
+ */
/* Locking and synchronization:
*
@@ -414,7 +421,10 @@
* flag after taking the lock, and if it is set, give up on whatever
* they are doing and drop the lock again. The orinoco_lock()
* function handles this (it unlocks and returns -EBUSY if
- * hw_unavailable is non-zero). */
+ * hw_unavailable is non-zero).
+ */
+
+#define DRIVER_NAME "orinoco"
#include <linux/config.h>
@@ -444,11 +454,9 @@
/* Module information */
/********************************************************************/
-MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & David Gibson <hermes@gibson.dropbear.id.au>");
MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based and similar wireless cards");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("Dual MPL/GPL");
-#endif
/* Level of debugging. Used in the macros in orinoco.h */
#ifdef ORINOCO_DEBUG
@@ -464,11 +472,6 @@ MODULE_PARM(suppress_linkstatus, "i");
/* Compile time configuration and compatibility stuff */
/********************************************************************/
-/* Wireless extensions backwards compatibility */
-#ifndef SIOCIWFIRSTPRIV
-#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
-#endif /* SIOCIWFIRSTPRIV */
-
/* We do this this way to avoid ifdefs in the actual code */
#ifdef WIRELESS_SPY
#define SPY_NUMBER(priv) (priv->spy_number)
@@ -497,25 +500,29 @@ MODULE_PARM(suppress_linkstatus, "i");
#define DUMMY_FID 0xFFFF
-#define RUP_EVEN(a) (((a) + 1) & (~1))
-
/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
HERMES_MAX_MULTICAST : 0)*/
#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
+#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
+ | HERMES_EV_TX | HERMES_EV_TXEXC \
+ | HERMES_EV_WTERR | HERMES_EV_INFO \
+ | HERMES_EV_INFDROP )
+
/********************************************************************/
/* Data tables */
/********************************************************************/
/* The frequency of each channel in MHz */
-const long channel_frequency[] = {
+static const long channel_frequency[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
-#define NUM_CHANNELS ( sizeof(channel_frequency) / sizeof(channel_frequency[0]) )
+#define NUM_CHANNELS ARRAY_SIZE(channel_frequency)
-/* This tables gives the actual meanings of the bitrate IDs returned by the firmware. */
-struct {
+/* This tables gives the actual meanings of the bitrate IDs returned
+ * by the firmware. */
+static struct {
int bitrate; /* in 100s of kilobits */
int automatic;
u16 agere_txratectrl;
@@ -530,7 +537,7 @@ struct {
{55, 1, 7, 7},
{110, 0, 5, 8},
};
-#define BITRATE_TABLE_SIZE (sizeof(bitrate_table) / sizeof(bitrate_table[0]))
+#define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table)
/********************************************************************/
/* Data types */
@@ -555,46 +562,758 @@ u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
+struct hermes_rx_descriptor {
+ u16 status;
+ u32 time;
+ u8 silence;
+ u8 signal;
+ u8 rate;
+ u8 rxflow;
+ u32 reserved;
+} __attribute__ ((packed));
+
/********************************************************************/
/* Function prototypes */
/********************************************************************/
+static int orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int __orinoco_program_rids(struct net_device *dev);
+static void __orinoco_set_multicast_list(struct net_device *dev);
+static int orinoco_debug_dump_recs(struct net_device *dev);
+
+/********************************************************************/
+/* Internal helper functions */
+/********************************************************************/
+
+static inline void set_port_type(struct orinoco_private *priv)
+{
+ switch (priv->iw_mode) {
+ case IW_MODE_INFRA:
+ priv->port_type = 1;
+ priv->createibss = 0;
+ break;
+ case IW_MODE_ADHOC:
+ if (priv->prefer_port3) {
+ priv->port_type = 3;
+ priv->createibss = 0;
+ } else {
+ priv->port_type = priv->ibss_port;
+ priv->createibss = 1;
+ }
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
+ priv->ndev->name);
+ }
+}
+
+/********************************************************************/
+/* Device methods */
+/********************************************************************/
+
+static int orinoco_open(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = __orinoco_up(dev);
+
+ if (! err)
+ priv->open = 1;
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+int orinoco_stop(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int err = 0;
+
+ /* We mustn't use orinoco_lock() here, because we need to be
+ able to close the interface even if hw_unavailable is set
+ (e.g. as we're released after a PC Card removal) */
+ spin_lock_irq(&priv->lock);
+
+ priv->open = 0;
+
+ err = __orinoco_down(dev);
+
+ spin_unlock_irq(&priv->lock);
+
+ return err;
+}
+
+static struct net_device_stats *orinoco_get_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+
+ return &priv->stats;
+}
+
+static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ struct iw_statistics *wstats = &priv->wstats;
+ int err = 0;
+ unsigned long flags;
+
+ if (! netif_device_present(dev)) {
+ printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
+ dev->name);
+ return NULL; /* FIXME: Can we do better than this? */
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return NULL; /* FIXME: Erg, we've been signalled, how
+ * do we propagate this back up? */
+
+ if (priv->iw_mode == IW_MODE_ADHOC) {
+ memset(&wstats->qual, 0, sizeof(wstats->qual));
+ /* If a spy address is defined, we report stats of the
+ * first spy address - Jean II */
+ if (SPY_NUMBER(priv)) {
+ wstats->qual.qual = priv->spy_stat[0].qual;
+ wstats->qual.level = priv->spy_stat[0].level;
+ wstats->qual.noise = priv->spy_stat[0].noise;
+ wstats->qual.updated = priv->spy_stat[0].updated;
+ }
+ } else {
+ struct {
+ u16 qual, signal, noise;
+ } __attribute__ ((packed)) cq;
+
+ err = HERMES_READ_RECORD(hw, USER_BAP,
+ HERMES_RID_COMMSQUALITY, &cq);
+
+ wstats->qual.qual = (int)le16_to_cpu(cq.qual);
+ wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
+ wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
+ wstats->qual.updated = 7;
+ }
+
+ /* We can't really wait for the tallies inquiry command to
+ * complete, so we just use the previous results and trigger
+ * a new tallies inquiry command for next time - Jean II */
+ /* FIXME: We're in user context (I think?), so we should just
+ wait for the tallies to come through */
+ err = hermes_inquire(hw, HERMES_INQ_TALLIES);
+
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return NULL;
+
+ return wstats;
+}
+
+static void orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
+ "called when hw_unavailable\n", dev->name);
+ return;
+ }
+
+ __orinoco_set_multicast_list(dev);
+ orinoco_unlock(priv, &flags);
+}
+
+static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+
+ if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
+ return -EINVAL;
+
+ if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) >
+ (priv->nicbuf_size - ETH_HLEN) )
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/********************************************************************/
+/* Tx path */
+/********************************************************************/
+
+static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 txfid = priv->txfid;
+ char *p;
+ struct ethhdr *eh;
+ int len, data_len, data_off;
+ struct hermes_tx_descriptor desc;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (! netif_running(dev)) {
+ printk(KERN_ERR "%s: Tx on stopped device!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (! priv->connected) {
+ /* Oops, the firmware hasn't established a connection,
+ silently drop the packet (this seems to be the
+ safest approach). */
+ stats->tx_errors++;
+ orinoco_unlock(priv, &flags);
+ dev_kfree_skb(skb);
+ TRACE_EXIT(dev->name);
+ return 0;
+ }
+
+ /* Length of the packet body */
+ /* FIXME: what if the skb is smaller than this? */
+ len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
+
+ eh = (struct ethhdr *)skb->data;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX);
+ err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing Tx descriptor to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Clear the 802.11 header and data length fields - some
+ * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
+ * if this isn't done. */
+ hermes_clear_words(hw, HERMES_DATA0,
+ HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
+
+ /* Encapsulate Ethernet-II frames */
+ if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
+ struct header_struct hdr;
+ data_len = len;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+ p = skb->data + ETH_HLEN;
+
+ /* 802.3 header */
+ memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
+ memcpy(hdr.src, eh->h_source, ETH_ALEN);
+ hdr.len = htons(data_len + ENCAPS_OVERHEAD);
+
+ /* 802.2 header */
+ memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
+
+ hdr.ethertype = eh->h_proto;
+ err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
+ txfid, HERMES_802_3_OFFSET);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing packet header to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+ } else { /* IEEE 802.3 frame */
+ data_len = len + ETH_HLEN;
+ data_off = HERMES_802_3_OFFSET;
+ p = skb->data;
+ }
+
+ /* Round up for odd length packets */
+ err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2),
+ txfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Finally, we actually initiate the send */
+ netif_stop_queue(dev);
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
+ txfid, NULL);
+ if (err) {
+ netif_start_queue(dev);
+ printk(KERN_ERR "%s: Error %d transmitting packet\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ dev->trans_start = jiffies;
+ stats->tx_bytes += data_off + data_len;
+
+ orinoco_unlock(priv, &flags);
+
+ dev_kfree_skb(skb);
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+ fail:
+ TRACE_EXIT(dev->name);
+
+ orinoco_unlock(priv, &flags);
+ return err;
+}
+
+static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ u16 fid = hermes_read_regn(hw, ALLOCFID);
+
+ if (fid != priv->txfid) {
+ if (fid != DUMMY_FID)
+ printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
+ dev->name, fid);
+ return;
+ } else {
+ netif_wake_queue(dev);
+ }
+
+ hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+
+ stats->tx_packets++;
+
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ u16 fid = hermes_read_regn(hw, TXCOMPLFID);
+ struct hermes_tx_descriptor desc;
+ int err = 0;
+
+ if (fid == DUMMY_FID)
+ return; /* Nothing's really happened */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc), fid, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
+ "(FID=%04X error %d)\n",
+ dev->name, fid, err);
+ } else {
+ DEBUG(1, "%s: Tx error, status %d\n",
+ dev->name, le16_to_cpu(desc.status));
+ }
+
+ stats->tx_errors++;
+
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void orinoco_tx_timeout(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ struct hermes *hw = &priv->hw;
+
+ printk(KERN_WARNING "%s: Tx timeout! "
+ "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
+ dev->name, hermes_read_regn(hw, ALLOCFID),
+ hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
+
+ stats->tx_errors++;
+
+ schedule_work(&priv->reset_work);
+}
+
+/********************************************************************/
+/* Rx path (data frames) */
+/********************************************************************/
+
+/* Does the frame have a SNAP header indicating it should be
+ * de-encapsulated to Ethernet-II? */
+static inline int is_ethersnap(struct header_struct *hdr)
+{
+ /* We de-encapsulate all packets which, a) have SNAP headers
+ * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
+ * and where b) the OUI of the SNAP header is 00:00:00 or
+ * 00:00:f8 - we need both because different APs appear to use
+ * different OUIs for some reason */
+ return (memcmp(&hdr->dsap, &encaps_hdr, 5) == 0)
+ && ( (hdr->oui[2] == 0x00) || (hdr->oui[2] == 0xf8) );
+}
+
+static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
+ int level, int noise)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int i;
+
+ /* Gather wireless spy statistics: for each packet, compare the
+ * source address with out list, and if match, get the stats... */
+ for (i = 0; i < priv->spy_number; i++)
+ if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) {
+ priv->spy_stat[i].level = level - 0x95;
+ priv->spy_stat[i].noise = noise - 0x95;
+ priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
+ priv->spy_stat[i].updated = 7;
+ }
+}
+
static void orinoco_stat_gather(struct net_device *dev,
struct sk_buff *skb,
- struct hermes_rx_descriptor *desc);
+ struct hermes_rx_descriptor *desc)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
-static struct net_device_stats *orinoco_get_stats(struct net_device *dev);
-static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev);
+ /* Using spy support with lots of Rx packets, like in an
+ * infrastructure (AP), will really slow down everything, because
+ * the MAC address must be compared to each entry of the spy list.
+ * If the user really asks for it (set some address in the
+ * spy list), we do it, but he will pay the price.
+ * Note that to get here, you need both WIRELESS_SPY
+ * compiled in AND some addresses in the list !!!
+ */
+ /* Note : gcc will optimise the whole section away if
+ * WIRELESS_SPY is not defined... - Jean II */
+ if (SPY_NUMBER(priv)) {
+ orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN,
+ desc->signal, desc->silence);
+ }
+}
-/* Hardware control routines */
+static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ struct iw_statistics *wstats = &priv->wstats;
+ struct sk_buff *skb = NULL;
+ u16 rxfid, status;
+ int length, data_len, data_off;
+ char *p;
+ struct hermes_rx_descriptor desc;
+ struct header_struct hdr;
+ struct ethhdr *eh;
+ int err;
-static int __orinoco_program_rids(struct net_device *dev);
+ rxfid = hermes_read_regn(hw, RXFID);
-static int __orinoco_hw_set_bitrate(struct orinoco_private *priv);
-static int __orinoco_hw_setup_wep(struct orinoco_private *priv);
-static int orinoco_hw_get_bssid(struct orinoco_private *priv, char buf[ETH_ALEN]);
-static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
- char buf[IW_ESSID_MAX_SIZE+1]);
-static long orinoco_hw_get_freq(struct orinoco_private *priv);
-static int orinoco_hw_get_bitratelist(struct orinoco_private *priv, int *numrates,
- s32 *rates, int max);
-static void __orinoco_set_multicast_list(struct net_device *dev);
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc),
+ rxfid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading Rx descriptor. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
-/* Interrupt handling routines */
-static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw);
-static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw);
-static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw);
-static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
-static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
-static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw);
-static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw);
-static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw);
-
-/* ioctl() routines */
-static int orinoco_debug_dump_recs(struct net_device *dev);
+ status = le16_to_cpu(desc.status);
+
+ if (status & HERMES_RXSTAT_ERR) {
+ if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
+ wstats->discard.code++;
+ DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
+ dev->name);
+ } else {
+ stats->rx_crc_errors++;
+ DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", dev->name);
+ }
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* For now we ignore the 802.11 header completely, assuming
+ that the card's firmware has handled anything vital */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &hdr, sizeof(hdr),
+ rxfid, HERMES_802_3_OFFSET);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame header. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ length = ntohs(hdr.len);
+
+ /* Sanity checks */
+ if (length < 3) { /* No for even an 802.2 LLC header */
+ /* At least on Symbol firmware with PCF we get quite a
+ lot of these legitimately - Poll frames with no
+ data. */
+ stats->rx_dropped++;
+ goto drop;
+ }
+ if (length > IEEE802_11_DATA_LEN) {
+ printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
+ dev->name, length);
+ stats->rx_length_errors++;
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* We need space for the packet data itself, plus an ethernet
+ header, plus 2 bytes so we can align the IP header on a
+ 32bit boundary, plus 1 byte so we can read in odd length
+ packets from the card, which has an IO granularity of 16
+ bits */
+ skb = dev_alloc_skb(length+ETH_HLEN+2+1);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
+ dev->name);
+ goto drop;
+ }
+
+ skb_reserve(skb, 2); /* This way the IP header is aligned */
+
+ /* Handle decapsulation
+ * In most cases, the firmware tell us about SNAP frames.
+ * For some reason, the SNAP frames sent by LinkSys APs
+ * are not properly recognised by most firmwares.
+ * So, check ourselves */
+ if (((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
+ ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
+ is_ethersnap(&hdr)) {
+ /* These indicate a SNAP within 802.2 LLC within
+ 802.11 frame which we'll need to de-encapsulate to
+ the original EthernetII frame. */
+
+ if (length < ENCAPS_OVERHEAD) { /* No room for full LLC+SNAP */
+ stats->rx_length_errors++;
+ goto drop;
+ }
+
+ /* Remove SNAP header, reconstruct EthernetII frame */
+ data_len = length - ENCAPS_OVERHEAD;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+
+ eh = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+
+ memcpy(eh, &hdr, 2 * ETH_ALEN);
+ eh->h_proto = hdr.ethertype;
+ } else {
+ /* All other cases indicate a genuine 802.3 frame. No
+ decapsulation needed. We just throw the whole
+ thing in, and hope the protocol layer can deal with
+ it as 802.3 */
+ data_len = length;
+ data_off = HERMES_802_3_OFFSET;
+ /* FIXME: we re-read from the card data we already read here */
+ }
+
+ p = skb_put(skb, data_len);
+ err = hermes_bap_pread(hw, IRQ_BAP, p, ALIGN(data_len, 2),
+ rxfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Process the wireless stats if needed */
+ orinoco_stat_gather(dev, skb, &desc);
+
+ /* Pass the packet to the networking stack */
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += length;
+
+ return;
+
+ drop:
+ stats->rx_dropped++;
+
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ return;
+}
/********************************************************************/
-/* Function prototypes */
+/* Rx path (info frames) */
+/********************************************************************/
+
+static void print_linkstatus(struct net_device *dev, u16 status)
+{
+ char * s;
+
+ if (suppress_linkstatus)
+ return;
+
+ switch (status) {
+ case HERMES_LINKSTATUS_NOT_CONNECTED:
+ s = "Not Connected";
+ break;
+ case HERMES_LINKSTATUS_CONNECTED:
+ s = "Connected";
+ break;
+ case HERMES_LINKSTATUS_DISCONNECTED:
+ s = "Disconnected";
+ break;
+ case HERMES_LINKSTATUS_AP_CHANGE:
+ s = "AP Changed";
+ break;
+ case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
+ s = "AP Out of Range";
+ break;
+ case HERMES_LINKSTATUS_AP_IN_RANGE:
+ s = "AP In Range";
+ break;
+ case HERMES_LINKSTATUS_ASSOC_FAILED:
+ s = "Association Failed";
+ break;
+ default:
+ s = "UNKNOWN";
+ }
+
+ printk(KERN_INFO "%s: New link status: %s (%04x)\n",
+ dev->name, s, status);
+}
+
+static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ u16 infofid;
+ struct {
+ u16 len;
+ u16 type;
+ } __attribute__ ((packed)) info;
+ int len, type;
+ int err;
+
+ /* This is an answer to an INQUIRE command that we did earlier,
+ * or an information "event" generated by the card
+ * The controller return to us a pseudo frame containing
+ * the information in question - Jean II */
+ infofid = hermes_read_regn(hw, INFOFID);
+
+ /* Read the info frame header - don't try too hard */
+ err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
+ infofid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading info frame. "
+ "Frame dropped.\n", dev->name, err);
+ return;
+ }
+
+ len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
+ type = le16_to_cpu(info.type);
+
+ switch (type) {
+ case HERMES_INQ_TALLIES: {
+ struct hermes_tallies_frame tallies;
+ struct iw_statistics *wstats = &priv->wstats;
+
+ if (len > sizeof(tallies)) {
+ printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
+ dev->name, len);
+ len = sizeof(tallies);
+ }
+
+ /* Read directly the data (no seek) */
+ hermes_read_words(hw, HERMES_DATA1, (void *) &tallies,
+ len / 2); /* FIXME: blech! */
+
+ /* Increment our various counters */
+ /* wstats->discard.nwid - no wrong BSSID stuff */
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxWEPUndecryptable);
+ if (len == sizeof(tallies))
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxDiscards_WEPICVError) +
+ le16_to_cpu(tallies.RxDiscards_WEPExcluded);
+ wstats->discard.misc +=
+ le16_to_cpu(tallies.TxDiscardsWrongSA);
+ wstats->discard.fragment +=
+ le16_to_cpu(tallies.RxMsgInBadMsgFragments);
+ wstats->discard.retries +=
+ le16_to_cpu(tallies.TxRetryLimitExceeded);
+ /* wstats->miss.beacon - no match */
+ }
+ break;
+ case HERMES_INQ_LINKSTATUS: {
+ struct hermes_linkstatus linkstatus;
+ u16 newstatus;
+
+ if (len != sizeof(linkstatus)) {
+ printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
+ dev->name, len);
+ break;
+ }
+
+ hermes_read_words(hw, HERMES_DATA1, (void *) &linkstatus,
+ len / 2);
+ newstatus = le16_to_cpu(linkstatus.linkstatus);
+
+ if ( (newstatus == HERMES_LINKSTATUS_CONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
+ || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE) )
+ priv->connected = 1;
+ else if ( (newstatus == HERMES_LINKSTATUS_NOT_CONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_DISCONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE)
+ || (newstatus == HERMES_LINKSTATUS_ASSOC_FAILED) )
+ priv->connected = 0;
+
+ if (newstatus != priv->last_linkstatus)
+ print_linkstatus(dev, newstatus);
+
+ priv->last_linkstatus = newstatus;
+ }
+ break;
+ default:
+ printk(KERN_DEBUG "%s: Unknown information frame received "
+ "(type %04x).\n", dev->name, type);
+ /* We don't actually do anything about it */
+ break;
+ }
+}
+
+static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
+{
+ if (net_ratelimit())
+ printk(KERN_WARNING "%s: Information frame lost.\n", dev->name);
+}
+
+/********************************************************************/
+/* Internal hardware control routines */
/********************************************************************/
int __orinoco_up(struct net_device *dev)
@@ -683,43 +1402,139 @@ int orinoco_reinit_firmware(struct net_device *dev)
return err;
}
-static int orinoco_open(struct net_device *dev)
+static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
{
- struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err;
-
- err = orinoco_lock(priv, &flags);
- if (err)
- return err;
-
- err = __orinoco_up(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
- if (! err)
- priv->open = 1;
+ if (priv->bitratemode >= BITRATE_TABLE_SIZE) {
+ printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
+ priv->ndev->name, priv->bitratemode);
+ return -EINVAL;
+ }
- orinoco_unlock(priv, &flags);
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].agere_txratectrl);
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ case FIRMWARE_TYPE_SYMBOL:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].intersil_txratectrl);
+ break;
+ default:
+ BUG();
+ }
return err;
}
-int orinoco_stop(struct net_device *dev)
+static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
{
- struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
int err = 0;
+ int master_wep_flag;
+ int auth_flag;
- /* We mustn't use orinoco_lock() here, because we need to be
- able to close the interface even if hw_unavailable is set
- (e.g. as we're released after a PC Card removal) */
- spin_lock_irq(&priv->lock);
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
+ if (priv->wep_on) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXKEY_AGERE,
+ priv->tx_key);
+ if (err)
+ return err;
+
+ err = HERMES_WRITE_RECORD(hw, USER_BAP,
+ HERMES_RID_CNFWEPKEYS_AGERE,
+ &priv->keys);
+ if (err)
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPENABLED_AGERE,
+ priv->wep_on);
+ if (err)
+ return err;
+ break;
- priv->open = 0;
+ case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
+ case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
+ master_wep_flag = 0; /* Off */
+ if (priv->wep_on) {
+ int keylen;
+ int i;
- err = __orinoco_down(dev);
+ /* Fudge around firmware weirdness */
+ keylen = le16_to_cpu(priv->keys[priv->tx_key].len);
+
+ /* Write all 4 keys */
+ for(i = 0; i < ORINOCO_MAX_KEYS; i++) {
+/* int keylen = le16_to_cpu(priv->keys[i].len); */
+
+ if (keylen > LARGE_KEY_SIZE) {
+ printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
+ priv->ndev->name, i, keylen);
+ return -E2BIG;
+ }
- spin_unlock_irq(&priv->lock);
+ err = hermes_write_ltv(hw, USER_BAP,
+ HERMES_RID_CNFDEFAULTKEY0 + i,
+ HERMES_BYTES_TO_RECLEN(keylen),
+ priv->keys[i].data);
+ if (err)
+ return err;
+ }
- return err;
+ /* Write the index of the key used in transmission */
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPDEFAULTKEYID,
+ priv->tx_key);
+ if (err)
+ return err;
+
+ if (priv->wep_restrict) {
+ auth_flag = 2;
+ master_wep_flag = 3;
+ } else {
+ /* Authentication is where Intersil and Symbol
+ * firmware differ... */
+ auth_flag = 1;
+ if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)
+ master_wep_flag = 3; /* Symbol */
+ else
+ master_wep_flag = 1; /* Intersil */
+ }
+
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFAUTHENTICATION,
+ auth_flag);
+ if (err)
+ return err;
+ }
+
+ /* Master WEP setting : on/off */
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPFLAGS_INTERSIL,
+ master_wep_flag);
+ if (err)
+ return err;
+
+ break;
+
+ default:
+ if (priv->wep_on) {
+ printk(KERN_ERR "%s: WEP enabled, although not supported!\n",
+ priv->ndev->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int __orinoco_program_rids(struct net_device *dev)
@@ -733,14 +1548,17 @@ static int __orinoco_program_rids(struct net_device *dev)
err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
if (err) {
- printk(KERN_ERR "%s: Error %d setting MAC address\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting MAC address\n",
+ dev->name, err);
return err;
}
/* Set up the link mode */
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE, priv->port_type);
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE,
+ priv->port_type);
if (err) {
- printk(KERN_ERR "%s: Error %d setting port type\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting port type\n",
+ dev->name, err);
return err;
}
/* Set the channel/frequency */
@@ -749,14 +1567,17 @@ static int __orinoco_program_rids(struct net_device *dev)
if (priv->createibss)
priv->channel = 10;
}
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL, priv->channel);
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL,
+ priv->channel);
if (err) {
- printk(KERN_ERR "%s: Error %d setting channel\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting channel\n",
+ dev->name, err);
return err;
}
if (priv->has_ibss) {
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFCREATEIBSS,
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFCREATEIBSS,
priv->createibss);
if (err) {
printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n", dev->name, err);
@@ -765,8 +1586,8 @@ static int __orinoco_program_rids(struct net_device *dev)
if ((strlen(priv->desired_essid) == 0) && (priv->createibss)
&& (!priv->has_ibss_any)) {
- printk(KERN_WARNING "%s: This firmware requires an \
-ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
+ printk(KERN_WARNING "%s: This firmware requires an "
+ "ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
/* With wvlan_cs, in this case, we would crash.
* hopefully, this driver will behave better...
* Jean II */
@@ -781,14 +1602,16 @@ ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
- printk(KERN_ERR "%s: Error %d setting OWNSSID\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting OWNSSID\n",
+ dev->name, err);
return err;
}
err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
- printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n",
+ dev->name, err);
return err;
}
@@ -799,26 +1622,31 @@ ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
&idbuf);
if (err) {
- printk(KERN_ERR "%s: Error %d setting nickname\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting nickname\n",
+ dev->name, err);
return err;
}
/* Set AP density */
if (priv->has_sensitivity) {
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFSYSTEMSCALE,
priv->ap_density);
if (err) {
printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
- "Disabling sensitivity control\n", dev->name, err);
+ "Disabling sensitivity control\n",
+ dev->name, err);
priv->has_sensitivity = 0;
}
}
/* Set RTS threshold */
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, priv->rts_thresh);
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
+ priv->rts_thresh);
if (err) {
- printk(KERN_ERR "%s: Error %d setting RTS threshold\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting RTS threshold\n",
+ dev->name, err);
return err;
}
@@ -832,20 +1660,23 @@ ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
priv->frag_thresh);
if (err) {
- printk(KERN_ERR "%s: Error %d setting framentation\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting fragmentation\n",
+ dev->name, err);
return err;
}
/* Set bitrate */
err = __orinoco_hw_set_bitrate(priv);
if (err) {
- printk(KERN_ERR "%s: Error %d setting bitrate\n", dev->name, err);
+ printk(KERN_ERR "%s: Error %d setting bitrate\n",
+ dev->name, err);
return err;
}
/* Set power management */
if (priv->has_pm) {
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED,
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMENABLED,
priv->pm_on);
if (err) {
printk(KERN_ERR "%s: Error %d setting up PM\n",
@@ -909,7 +1740,73 @@ ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
return 0;
}
-/* xyzzy */
+/* FIXME: return int? */
+static void
+__orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int promisc, mc_count;
+
+ /* The Hermes doesn't seem to have an allmulti mode, so we go
+ * into promiscuous mode and let the upper levels deal. */
+ if ( (dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > MAX_MULTICAST(priv)) ) {
+ promisc = 1;
+ mc_count = 0;
+ } else {
+ promisc = 0;
+ mc_count = dev->mc_count;
+ }
+
+ if (promisc != priv->promiscuous) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPROMISCUOUSMODE,
+ promisc);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
+ dev->name, err);
+ } else
+ priv->promiscuous = promisc;
+ }
+
+ if (! promisc && (mc_count || priv->mc_count) ) {
+ struct dev_mc_list *p = dev->mc_list;
+ struct hermes_multicast mclist;
+ int i;
+
+ for (i = 0; i < mc_count; i++) {
+ /* paranoia: is list shorter than mc_count? */
+ BUG_ON(! p);
+ /* paranoia: bad address size in list? */
+ BUG_ON(p->dmi_addrlen != ETH_ALEN);
+
+ memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
+ p = p->next;
+ }
+
+ if (p)
+ printk(KERN_WARNING "Multicast list is longer than mc_count\n");
+
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES,
+ HERMES_BYTES_TO_RECLEN(priv->mc_count * ETH_ALEN),
+ &mclist);
+ if (err)
+ printk(KERN_ERR "%s: Error %d setting multicast list.\n",
+ dev->name, err);
+ else
+ priv->mc_count = mc_count;
+ }
+
+ /* Since we can set the promiscuous flag when it wasn't asked
+ for, make sure the net_device knows about it. */
+ if (priv->promiscuous)
+ dev->flags |= IFF_PROMISC;
+ else
+ dev->flags &= ~IFF_PROMISC;
+}
+
static int orinoco_reconfigure(struct net_device *dev)
{
struct orinoco_private *priv = netdev_priv(dev);
@@ -974,7 +1871,7 @@ static void orinoco_reset(struct net_device *dev)
if (err)
/* When the hardware becomes available again, whatever
* detects that is responsible for re-initializing
- * it. So no need for anything further*/
+ * it. So no need for anything further */
return;
netif_stop_queue(dev);
@@ -993,8 +1890,8 @@ static void orinoco_reset(struct net_device *dev)
if (priv->hard_reset)
err = (*priv->hard_reset)(priv);
if (err) {
- printk(KERN_ERR "%s: orinoco_reset: Error %d performing hard reset\n",
- dev->name, err);
+ printk(KERN_ERR "%s: orinoco_reset: Error %d "
+ "performing hard reset\n", dev->name, err);
/* FIXME: shutdown of some sort */
return;
}
@@ -1027,409 +1924,22 @@ static void orinoco_reset(struct net_device *dev)
}
/********************************************************************/
-/* Internal helper functions */
+/* Interrupt handler */
/********************************************************************/
-static inline void
-set_port_type(struct orinoco_private *priv)
-{
- switch (priv->iw_mode) {
- case IW_MODE_INFRA:
- priv->port_type = 1;
- priv->createibss = 0;
- break;
- case IW_MODE_ADHOC:
- if (priv->prefer_port3) {
- priv->port_type = 3;
- priv->createibss = 0;
- } else {
- priv->port_type = priv->ibss_port;
- priv->createibss = 1;
- }
- break;
- default:
- printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
- priv->ndev->name);
- }
-}
-
-/* Does the frame have a SNAP header indicating it should be
- * de-encapsulated to Ethernet-II? */
-static inline int
-is_ethersnap(struct header_struct *hdr)
-{
- /* We de-encapsulate all packets which, a) have SNAP headers
- * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
- * and where b) the OUI of the SNAP header is 00:00:00 or
- * 00:00:f8 - we need both because different APs appear to use
- * different OUIs for some reason */
- return (memcmp(&hdr->dsap, &encaps_hdr, 5) == 0)
- && ( (hdr->oui[2] == 0x00) || (hdr->oui[2] == 0xf8) );
-}
-
-static void
-orinoco_set_multicast_list(struct net_device *dev)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0) {
- printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
- "called when hw_unavailable\n", dev->name);
- return;
- }
-
- __orinoco_set_multicast_list(dev);
- orinoco_unlock(priv, &flags);
-}
-
-/********************************************************************/
-/* Hardware control functions */
-/********************************************************************/
-
-
-static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
-{
- hermes_t *hw = &priv->hw;
- int err = 0;
-
- if (priv->bitratemode >= BITRATE_TABLE_SIZE) {
- printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
- priv->ndev->name, priv->bitratemode);
- return -EINVAL;
- }
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE:
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFTXRATECONTROL,
- bitrate_table[priv->bitratemode].agere_txratectrl);
- break;
- case FIRMWARE_TYPE_INTERSIL:
- case FIRMWARE_TYPE_SYMBOL:
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFTXRATECONTROL,
- bitrate_table[priv->bitratemode].intersil_txratectrl);
- break;
- default:
- BUG();
- }
-
- return err;
-}
-
-
-static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
-{
- hermes_t *hw = &priv->hw;
- int err = 0;
- int master_wep_flag;
- int auth_flag;
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
- if (priv->wep_on) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFTXKEY_AGERE,
- priv->tx_key);
- if (err)
- return err;
-
- err = HERMES_WRITE_RECORD(hw, USER_BAP,
- HERMES_RID_CNFWEPKEYS_AGERE,
- &priv->keys);
- if (err)
- return err;
- }
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFWEPENABLED_AGERE,
- priv->wep_on);
- if (err)
- return err;
- break;
-
- case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
- case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
- master_wep_flag = 0; /* Off */
- if (priv->wep_on) {
- int keylen;
- int i;
-
- /* Fudge around firmware weirdness */
- keylen = le16_to_cpu(priv->keys[priv->tx_key].len);
-
- /* Write all 4 keys */
- for(i = 0; i < ORINOCO_MAX_KEYS; i++) {
-/* int keylen = le16_to_cpu(priv->keys[i].len); */
-
- if (keylen > LARGE_KEY_SIZE) {
- printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
- priv->ndev->name, i, keylen);
- return -E2BIG;
- }
-
- err = hermes_write_ltv(hw, USER_BAP,
- HERMES_RID_CNFDEFAULTKEY0 + i,
- HERMES_BYTES_TO_RECLEN(keylen),
- priv->keys[i].data);
- if (err)
- return err;
- }
-
- /* Write the index of the key used in transmission */
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPDEFAULTKEYID,
- priv->tx_key);
- if (err)
- return err;
-
- if (priv->wep_restrict) {
- auth_flag = 2;
- master_wep_flag = 3;
- } else {
- /* Authentication is where Intersil and Symbol
- * firmware differ... */
- auth_flag = 1;
- if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)
- master_wep_flag = 3; /* Symbol */
- else
- master_wep_flag = 1; /* Intersil */
- }
-
-
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFAUTHENTICATION, auth_flag);
- if (err)
- return err;
- }
-
- /* Master WEP setting : on/off */
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFWEPFLAGS_INTERSIL,
- master_wep_flag);
- if (err)
- return err;
-
- break;
-
- default:
- if (priv->wep_on) {
- printk(KERN_ERR "%s: WEP enabled, although not supported!\n",
- priv->ndev->name);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int orinoco_hw_get_bssid(struct orinoco_private *priv,
- char buf[ETH_ALEN])
-{
- hermes_t *hw = &priv->hw;
- int err = 0;
- unsigned long flags;
-
- err = orinoco_lock(priv, &flags);
- if (err)
- return err;
-
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, buf);
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
- char buf[IW_ESSID_MAX_SIZE+1])
-{
- hermes_t *hw = &priv->hw;
- int err = 0;
- struct hermes_idstring essidbuf;
- char *p = (char *)(&essidbuf.val);
- int len;
- unsigned long flags;
-
- err = orinoco_lock(priv, &flags);
- if (err)
- return err;
-
- if (strlen(priv->desired_essid) > 0) {
- /* We read the desired SSID from the hardware rather
- than from priv->desired_essid, just in case the
- firmware is allowed to change it on us. I'm not
- sure about this */
- /* My guess is that the OWNSSID should always be whatever
- * we set to the card, whereas CURRENT_SSID is the one that
- * may change... - Jean II */
- u16 rid;
-
- *active = 1;
-
- rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
- HERMES_RID_CNFDESIREDSSID;
-
- err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
- NULL, &essidbuf);
- if (err)
- goto fail_unlock;
- } else {
- *active = 0;
-
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
- sizeof(essidbuf), NULL, &essidbuf);
- if (err)
- goto fail_unlock;
- }
-
- len = le16_to_cpu(essidbuf.len);
-
- memset(buf, 0, IW_ESSID_MAX_SIZE+1);
- memcpy(buf, p, len);
- buf[len] = '\0';
-
- fail_unlock:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static long orinoco_hw_get_freq(struct orinoco_private *priv)
-{
-
- hermes_t *hw = &priv->hw;
- int err = 0;
- u16 channel;
- long freq = 0;
- unsigned long flags;
-
- err = orinoco_lock(priv, &flags);
- if (err)
- return err;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel);
- if (err)
- goto out;
-
- /* Intersil firmware 1.3.5 returns 0 when the interface is down */
- if (channel == 0) {
- err = -EBUSY;
- goto out;
- }
-
- if ( (channel < 1) || (channel > NUM_CHANNELS) ) {
- printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
- priv->ndev->name, channel);
- err = -EBUSY;
- goto out;
-
- }
- freq = channel_frequency[channel-1] * 100000;
-
- out:
- orinoco_unlock(priv, &flags);
-
- if (err > 0)
- err = -EBUSY;
- return err ? err : freq;
-}
-
-static int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
- int *numrates, s32 *rates, int max)
+static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
{
- hermes_t *hw = &priv->hw;
- struct hermes_idstring list;
- unsigned char *p = (unsigned char *)&list.val;
- int err = 0;
- int num;
- int i;
- unsigned long flags;
-
- err = orinoco_lock(priv, &flags);
- if (err)
- return err;
-
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
- sizeof(list), NULL, &list);
- orinoco_unlock(priv, &flags);
-
- if (err)
- return err;
-
- num = le16_to_cpu(list.len);
- *numrates = num;
- num = min(num, max);
-
- for (i = 0; i < num; i++) {
- rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
- }
-
- return 0;
+ printk(KERN_DEBUG "%s: TICK\n", dev->name);
}
-#if 0
-static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
+static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
{
- printk(KERN_DEBUG "RX descriptor:\n");
- printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
- printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
- printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
- printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
- printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
- printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
- printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
-
- printk(KERN_DEBUG "IEEE 802.11 header:\n");
- printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
- frame->p80211.frame_ctl);
- printk(KERN_DEBUG " duration_id = 0x%04x\n",
- frame->p80211.duration_id);
- printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr1[0], frame->p80211.addr1[1],
- frame->p80211.addr1[2], frame->p80211.addr1[3],
- frame->p80211.addr1[4], frame->p80211.addr1[5]);
- printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr2[0], frame->p80211.addr2[1],
- frame->p80211.addr2[2], frame->p80211.addr2[3],
- frame->p80211.addr2[4], frame->p80211.addr2[5]);
- printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr3[0], frame->p80211.addr3[1],
- frame->p80211.addr3[2], frame->p80211.addr3[3],
- frame->p80211.addr3[4], frame->p80211.addr3[5]);
- printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
- frame->p80211.seq_ctl);
- printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr4[0], frame->p80211.addr4[1],
- frame->p80211.addr4[2], frame->p80211.addr4[3],
- frame->p80211.addr4[4], frame->p80211.addr4[5]);
- printk(KERN_DEBUG " data_len = 0x%04x\n",
- frame->p80211.data_len);
-
- printk(KERN_DEBUG "IEEE 802.3 header:\n");
- printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p8023.h_dest[0], frame->p8023.h_dest[1],
- frame->p8023.h_dest[2], frame->p8023.h_dest[3],
- frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
- printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p8023.h_source[0], frame->p8023.h_source[1],
- frame->p8023.h_source[2], frame->p8023.h_source[3],
- frame->p8023.h_source[4], frame->p8023.h_source[5]);
- printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
-
- printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
- printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
- printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
- printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
- printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
- frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
- printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
+ /* This seems to happen a fair bit under load, but ignoring it
+ seems to work fine...*/
+ printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
+ dev->name);
}
-#endif /* 0 */
-/*
- * Interrupt handler
- */
irqreturn_t orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *)dev_id;
@@ -1440,7 +1950,8 @@ irqreturn_t orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* These are used to detect a runaway interrupt situation */
/* If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
* we panic and shut down the hardware */
- static int last_irq_jiffy = 0; /* jiffies value the last time we were called */
+ static int last_irq_jiffy = 0; /* jiffies value the last time
+ * we were called */
static int loops_this_jiffy = 0;
unsigned long flags;
@@ -1504,368 +2015,9 @@ irqreturn_t orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED;
}
-static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
-{
- printk(KERN_DEBUG "%s: TICK\n", dev->name);
-}
-
-static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
-{
- /* This seems to happen a fair bit under load, but ignoring it
- seems to work fine...*/
- printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
- dev->name);
-}
-
-static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
-{
- if (net_ratelimit())
- printk(KERN_WARNING "%s: Information frame lost.\n", dev->name);
-}
-
-static void print_linkstatus(struct net_device *dev, u16 status)
-{
- char * s;
-
- if (suppress_linkstatus)
- return;
-
- switch (status) {
- case HERMES_LINKSTATUS_NOT_CONNECTED:
- s = "Not Connected";
- break;
- case HERMES_LINKSTATUS_CONNECTED:
- s = "Connected";
- break;
- case HERMES_LINKSTATUS_DISCONNECTED:
- s = "Disconnected";
- break;
- case HERMES_LINKSTATUS_AP_CHANGE:
- s = "AP Changed";
- break;
- case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
- s = "AP Out of Range";
- break;
- case HERMES_LINKSTATUS_AP_IN_RANGE:
- s = "AP In Range";
- break;
- case HERMES_LINKSTATUS_ASSOC_FAILED:
- s = "Association Failed";
- break;
- default:
- s = "UNKNOWN";
- }
-
- printk(KERN_INFO "%s: New link status: %s (%04x)\n",
- dev->name, s, status);
-}
-
-static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- u16 infofid;
- struct {
- u16 len;
- u16 type;
- } __attribute__ ((packed)) info;
- int len, type;
- int err;
-
- /* This is an answer to an INQUIRE command that we did earlier,
- * or an information "event" generated by the card
- * The controller return to us a pseudo frame containing
- * the information in question - Jean II */
- infofid = hermes_read_regn(hw, INFOFID);
-
- /* Read the info frame header - don't try too hard */
- err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
- infofid, 0);
- if (err) {
- printk(KERN_ERR "%s: error %d reading info frame. "
- "Frame dropped.\n", dev->name, err);
- return;
- }
-
- len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
- type = le16_to_cpu(info.type);
-
- switch (type) {
- case HERMES_INQ_TALLIES: {
- struct hermes_tallies_frame tallies;
- struct iw_statistics *wstats = &priv->wstats;
-
- if (len > sizeof(tallies)) {
- printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
- dev->name, len);
- len = sizeof(tallies);
- }
-
- /* Read directly the data (no seek) */
- hermes_read_words(hw, HERMES_DATA1, (void *) &tallies,
- len / 2); /* FIXME: blech! */
-
- /* Increment our various counters */
- /* wstats->discard.nwid - no wrong BSSID stuff */
- wstats->discard.code +=
- le16_to_cpu(tallies.RxWEPUndecryptable);
- if (len == sizeof(tallies))
- wstats->discard.code +=
- le16_to_cpu(tallies.RxDiscards_WEPICVError) +
- le16_to_cpu(tallies.RxDiscards_WEPExcluded);
- wstats->discard.misc +=
- le16_to_cpu(tallies.TxDiscardsWrongSA);
-#if WIRELESS_EXT > 11
- wstats->discard.fragment +=
- le16_to_cpu(tallies.RxMsgInBadMsgFragments);
- wstats->discard.retries +=
- le16_to_cpu(tallies.TxRetryLimitExceeded);
- /* wstats->miss.beacon - no match */
-#endif /* WIRELESS_EXT > 11 */
- }
- break;
- case HERMES_INQ_LINKSTATUS: {
- struct hermes_linkstatus linkstatus;
- u16 newstatus;
-
- if (len != sizeof(linkstatus)) {
- printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
- dev->name, len);
- break;
- }
-
- hermes_read_words(hw, HERMES_DATA1, (void *) &linkstatus,
- len / 2);
- newstatus = le16_to_cpu(linkstatus.linkstatus);
-
- if ( (newstatus == HERMES_LINKSTATUS_CONNECTED)
- || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
- || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE) )
- priv->connected = 1;
- else if ( (newstatus == HERMES_LINKSTATUS_NOT_CONNECTED)
- || (newstatus == HERMES_LINKSTATUS_DISCONNECTED)
- || (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE)
- || (newstatus == HERMES_LINKSTATUS_ASSOC_FAILED) )
- priv->connected = 0;
-
- if (newstatus != priv->last_linkstatus)
- print_linkstatus(dev, newstatus);
-
- priv->last_linkstatus = newstatus;
- }
- break;
- default:
- printk(KERN_DEBUG "%s: Unknown information frame received (type %04x).\n",
- dev->name, type);
- /* We don't actually do anything about it */
- break;
- }
-}
-
-static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
- struct iw_statistics *wstats = &priv->wstats;
- struct sk_buff *skb = NULL;
- u16 rxfid, status;
- int length, data_len, data_off;
- char *p;
- struct hermes_rx_descriptor desc;
- struct header_struct hdr;
- struct ethhdr *eh;
- int err;
-
- rxfid = hermes_read_regn(hw, RXFID);
-
- err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc),
- rxfid, 0);
- if (err) {
- printk(KERN_ERR "%s: error %d reading Rx descriptor. "
- "Frame dropped.\n", dev->name, err);
- stats->rx_errors++;
- goto drop;
- }
-
- status = le16_to_cpu(desc.status);
-
- if (status & HERMES_RXSTAT_ERR) {
- if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
- wstats->discard.code++;
- DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
- dev->name);
- } else {
- stats->rx_crc_errors++;
- DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", dev->name);
- }
- stats->rx_errors++;
- goto drop;
- }
-
- /* For now we ignore the 802.11 header completely, assuming
- that the card's firmware has handled anything vital */
-
- err = hermes_bap_pread(hw, IRQ_BAP, &hdr, sizeof(hdr),
- rxfid, HERMES_802_3_OFFSET);
- if (err) {
- printk(KERN_ERR "%s: error %d reading frame header. "
- "Frame dropped.\n", dev->name, err);
- stats->rx_errors++;
- goto drop;
- }
-
- length = ntohs(hdr.len);
-
- /* Sanity checks */
- if (length < 3) { /* No for even an 802.2 LLC header */
- /* At least on Symbol firmware with PCF we get quite a
- lot of these legitimately - Poll frames with no
- data. */
- stats->rx_dropped++;
- goto drop;
- }
- if (length > IEEE802_11_DATA_LEN) {
- printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
- dev->name, length);
- stats->rx_length_errors++;
- stats->rx_errors++;
- goto drop;
- }
-
- /* We need space for the packet data itself, plus an ethernet
- header, plus 2 bytes so we can align the IP header on a
- 32bit boundary, plus 1 byte so we can read in odd length
- packets from the card, which has an IO granularity of 16
- bits */
- skb = dev_alloc_skb(length+ETH_HLEN+2+1);
- if (!skb) {
- printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
- dev->name);
- goto drop;
- }
-
- skb_reserve(skb, 2); /* This way the IP header is aligned */
-
- /* Handle decapsulation
- * In most cases, the firmware tell us about SNAP frames.
- * For some reason, the SNAP frames sent by LinkSys APs
- * are not properly recognised by most firmwares.
- * So, check ourselves */
- if(((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
- ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
- is_ethersnap(&hdr)) {
- /* These indicate a SNAP within 802.2 LLC within
- 802.11 frame which we'll need to de-encapsulate to
- the original EthernetII frame. */
-
- if (length < ENCAPS_OVERHEAD) { /* No room for full LLC+SNAP */
- stats->rx_length_errors++;
- goto drop;
- }
-
- /* Remove SNAP header, reconstruct EthernetII frame */
- data_len = length - ENCAPS_OVERHEAD;
- data_off = HERMES_802_3_OFFSET + sizeof(hdr);
-
- eh = (struct ethhdr *)skb_put(skb, ETH_HLEN);
-
- memcpy(eh, &hdr, 2 * ETH_ALEN);
- eh->h_proto = hdr.ethertype;
- } else {
- /* All other cases indicate a genuine 802.3 frame. No
- decapsulation needed. We just throw the whole
- thing in, and hope the protocol layer can deal with
- it as 802.3 */
- data_len = length;
- data_off = HERMES_802_3_OFFSET;
- /* FIXME: we re-read from the card data we already read here */
- }
-
- p = skb_put(skb, data_len);
- err = hermes_bap_pread(hw, IRQ_BAP, p, RUP_EVEN(data_len),
- rxfid, data_off);
- if (err) {
- printk(KERN_ERR "%s: error %d reading frame. "
- "Frame dropped.\n", dev->name, err);
- stats->rx_errors++;
- goto drop;
- }
-
- dev->last_rx = jiffies;
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
-
- /* Process the wireless stats if needed */
- orinoco_stat_gather(dev, skb, &desc);
-
- /* Pass the packet to the networking stack */
- netif_rx(skb);
- stats->rx_packets++;
- stats->rx_bytes += length;
-
- return;
-
- drop:
- stats->rx_dropped++;
-
- if (skb)
- dev_kfree_skb_irq(skb);
- return;
-}
-
-static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
- u16 fid = hermes_read_regn(hw, TXCOMPLFID);
- struct hermes_tx_descriptor desc;
- int err = 0;
-
- if (fid == DUMMY_FID)
- return; /* Nothing's really happened */
-
- err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc), fid, 0);
- if (err) {
- printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
- "(FID=%04X error %d)\n",
- dev->name, fid, err);
- } else {
- DEBUG(1, "%s: Tx error, status %d\n",
- dev->name, le16_to_cpu(desc.status));
- }
-
- stats->tx_errors++;
-
- hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
-}
-
-static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
-
- stats->tx_packets++;
-
- hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
-}
-
-static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
-{
- struct orinoco_private *priv = netdev_priv(dev);
-
- u16 fid = hermes_read_regn(hw, ALLOCFID);
-
- if (fid != priv->txfid) {
- if (fid != DUMMY_FID)
- printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
- dev->name, fid);
- return;
- } else {
- netif_wake_queue(dev);
- }
-
- hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
-}
+/********************************************************************/
+/* Initialization */
+/********************************************************************/
struct sta_id {
u16 id, variant, major, minor;
@@ -1901,11 +2053,11 @@ static void determine_firmware(struct net_device *dev)
dev->name, err);
memset(&sta_id, 0, sizeof(sta_id));
}
+
le16_to_cpus(&sta_id.id);
le16_to_cpus(&sta_id.variant);
le16_to_cpus(&sta_id.major);
le16_to_cpus(&sta_id.minor);
-
printk(KERN_DEBUG "%s: Station identity %04x:%04x:%04x:%04x\n",
dev->name, sta_id.id, sta_id.variant,
sta_id.major, sta_id.minor);
@@ -2018,12 +2170,7 @@ static void determine_firmware(struct net_device *dev)
}
}
-/*
- * struct net_device methods
- */
-
-static int
-orinoco_init(struct net_device *dev)
+static int orinoco_init(struct net_device *dev)
{
struct orinoco_private *priv = netdev_priv(dev);
hermes_t *hw = &priv->hw;
@@ -2112,7 +2259,8 @@ orinoco_init(struct net_device *dev)
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
&priv->rts_thresh);
if (err) {
- printk(KERN_ERR "%s: failed to read RTS threshold!\n", dev->name);
+ printk(KERN_ERR "%s: failed to read RTS threshold!\n",
+ dev->name);
goto out;
}
@@ -2125,7 +2273,8 @@ orinoco_init(struct net_device *dev)
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
&priv->frag_thresh);
if (err) {
- printk(KERN_ERR "%s: failed to read fragmentation settings!\n", dev->name);
+ printk(KERN_ERR "%s: failed to read fragmentation settings!\n",
+ dev->name);
goto out;
}
@@ -2153,7 +2302,8 @@ orinoco_init(struct net_device *dev)
/* Preamble setup */
if (priv->has_preamble) {
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL,
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPREAMBLE_SYMBOL,
&priv->preamble);
if (err)
goto out;
@@ -2202,357 +2352,204 @@ orinoco_init(struct net_device *dev)
return err;
}
-struct net_device_stats *
-orinoco_get_stats(struct net_device *dev)
+struct net_device *alloc_orinocodev(int sizeof_card,
+ int (*hard_reset)(struct orinoco_private *))
{
- struct orinoco_private *priv = netdev_priv(dev);
-
- return &priv->stats;
-}
+ struct net_device *dev;
+ struct orinoco_private *priv;
-struct iw_statistics *
-orinoco_get_wireless_stats(struct net_device *dev)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- hermes_t *hw = &priv->hw;
- struct iw_statistics *wstats = &priv->wstats;
- int err = 0;
- unsigned long flags;
+ dev = alloc_etherdev(sizeof(struct orinoco_private) + sizeof_card);
+ if (! dev)
+ return NULL;
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ if (sizeof_card)
+ priv->card = (void *)((unsigned long)netdev_priv(dev)
+ + sizeof(struct orinoco_private));
+ else
+ priv->card = NULL;
- if (! netif_device_present(dev)) {
- printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
- dev->name);
- return NULL; /* FIXME: Can we do better than this? */
- }
+ /* Setup / override net_device fields */
+ dev->init = orinoco_init;
+ dev->hard_start_xmit = orinoco_xmit;
+ dev->tx_timeout = orinoco_tx_timeout;
+ dev->watchdog_timeo = HZ; /* 1 second timeout */
+ dev->get_stats = orinoco_get_stats;
+ dev->get_wireless_stats = orinoco_get_wireless_stats;
+ dev->do_ioctl = orinoco_ioctl;
+ dev->change_mtu = orinoco_change_mtu;
+ dev->set_multicast_list = orinoco_set_multicast_list;
+ /* we use the default eth_mac_addr for setting the MAC addr */
- err = orinoco_lock(priv, &flags);
- if (err)
- return NULL; /* FIXME: Erg, we've been signalled, how
- * do we propagate this back up? */
+ /* Set up default callbacks */
+ dev->open = orinoco_open;
+ dev->stop = orinoco_stop;
+ priv->hard_reset = hard_reset;
- if (priv->iw_mode == IW_MODE_ADHOC) {
- memset(&wstats->qual, 0, sizeof(wstats->qual));
- /* If a spy address is defined, we report stats of the
- * first spy address - Jean II */
- if (SPY_NUMBER(priv)) {
- wstats->qual.qual = priv->spy_stat[0].qual;
- wstats->qual.level = priv->spy_stat[0].level;
- wstats->qual.noise = priv->spy_stat[0].noise;
- wstats->qual.updated = priv->spy_stat[0].updated;
- }
- } else {
- struct {
- u16 qual, signal, noise;
- } __attribute__ ((packed)) cq;
+ spin_lock_init(&priv->lock);
+ priv->open = 0;
+ priv->hw_unavailable = 1; /* orinoco_init() must clear this
+ * before anything else touches the
+ * hardware */
+ INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
- err = HERMES_READ_RECORD(hw, USER_BAP,
- HERMES_RID_COMMSQUALITY, &cq);
-
- wstats->qual.qual = (int)le16_to_cpu(cq.qual);
- wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
- wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
- wstats->qual.updated = 7;
- }
+ priv->last_linkstatus = 0xffff;
+ priv->connected = 0;
- /* We can't really wait for the tallies inquiry command to
- * complete, so we just use the previous results and trigger
- * a new tallies inquiry command for next time - Jean II */
- /* FIXME: We're in user context (I think?), so we should just
- wait for the tallies to come through */
- err = hermes_inquire(hw, HERMES_INQ_TALLIES);
-
- orinoco_unlock(priv, &flags);
+ return dev;
- if (err)
- return NULL;
-
- return wstats;
}
-static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
- int level, int noise)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- int i;
-
- /* Gather wireless spy statistics: for each packet, compare the
- * source address with out list, and if match, get the stats... */
- for (i = 0; i < priv->spy_number; i++)
- if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) {
- priv->spy_stat[i].level = level - 0x95;
- priv->spy_stat[i].noise = noise - 0x95;
- priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
- priv->spy_stat[i].updated = 7;
- }
-}
-
-void
-orinoco_stat_gather(struct net_device *dev,
- struct sk_buff *skb,
- struct hermes_rx_descriptor *desc)
-{
- struct orinoco_private *priv = netdev_priv(dev);
-
- /* Using spy support with lots of Rx packets, like in an
- * infrastructure (AP), will really slow down everything, because
- * the MAC address must be compared to each entry of the spy list.
- * If the user really asks for it (set some address in the
- * spy list), we do it, but he will pay the price.
- * Note that to get here, you need both WIRELESS_SPY
- * compiled in AND some addresses in the list !!!
- */
- /* Note : gcc will optimise the whole section away if
- * WIRELESS_SPY is not defined... - Jean II */
- if (SPY_NUMBER(priv)) {
- orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN,
- desc->signal, desc->silence);
- }
-}
+/********************************************************************/
+/* Wireless extensions */
+/********************************************************************/
-static int
-orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
+static int orinoco_hw_get_bssid(struct orinoco_private *priv,
+ char buf[ETH_ALEN])
{
- struct orinoco_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
hermes_t *hw = &priv->hw;
int err = 0;
- u16 txfid = priv->txfid;
- char *p;
- struct ethhdr *eh;
- int len, data_len, data_off;
- struct hermes_tx_descriptor desc;
unsigned long flags;
- TRACE_ENTER(dev->name);
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
- if (! netif_running(dev)) {
- printk(KERN_ERR "%s: Tx on stopped device!\n",
- dev->name);
- TRACE_EXIT(dev->name);
- return 1;
- }
-
- if (netif_queue_stopped(dev)) {
- printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
- dev->name);
- TRACE_EXIT(dev->name);
- return 1;
- }
-
- if (orinoco_lock(priv, &flags) != 0) {
- printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
- dev->name);
- TRACE_EXIT(dev->name);
-/* BUG(); */
- return 1;
- }
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, buf);
- if (! priv->connected) {
- /* Oops, the firmware hasn't established a connection,
- silently drop the packet (this seems to be the
- safest approach). */
- stats->tx_errors++;
- orinoco_unlock(priv, &flags);
- dev_kfree_skb(skb);
- TRACE_EXIT(dev->name);
- return 0;
- }
+ orinoco_unlock(priv, &flags);
- /* Length of the packet body */
- /* FIXME: what if the skb is smaller than this? */
- len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
+ return err;
+}
- eh = (struct ethhdr *)skb->data;
+static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
+ char buf[IW_ESSID_MAX_SIZE+1])
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ struct hermes_idstring essidbuf;
+ char *p = (char *)(&essidbuf.val);
+ int len;
+ unsigned long flags;
- memset(&desc, 0, sizeof(desc));
- desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX);
- err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0);
- if (err) {
- printk(KERN_ERR "%s: Error %d writing Tx descriptor to BAP\n",
- dev->name, err);
- stats->tx_errors++;
- goto fail;
- }
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
- /* Clear the 802.11 header and data length fields - some
- * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
- * if this isn't done. */
- hermes_clear_words(hw, HERMES_DATA0,
- HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
+ if (strlen(priv->desired_essid) > 0) {
+ /* We read the desired SSID from the hardware rather
+ than from priv->desired_essid, just in case the
+ firmware is allowed to change it on us. I'm not
+ sure about this */
+ /* My guess is that the OWNSSID should always be whatever
+ * we set to the card, whereas CURRENT_SSID is the one that
+ * may change... - Jean II */
+ u16 rid;
- /* Encapsulate Ethernet-II frames */
- if (ntohs(eh->h_proto) > 1500) { /* Ethernet-II frame */
- struct header_struct hdr;
- data_len = len;
- data_off = HERMES_802_3_OFFSET + sizeof(hdr);
- p = skb->data + ETH_HLEN;
+ *active = 1;
- /* 802.3 header */
- memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
- memcpy(hdr.src, eh->h_source, ETH_ALEN);
- hdr.len = htons(data_len + ENCAPS_OVERHEAD);
+ rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
+ HERMES_RID_CNFDESIREDSSID;
- /* 802.2 header */
- memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
-
- hdr.ethertype = eh->h_proto;
- err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
- txfid, HERMES_802_3_OFFSET);
- if (err) {
- printk(KERN_ERR "%s: Error %d writing packet header to BAP\n",
- dev->name, err);
- stats->tx_errors++;
- goto fail;
- }
- } else { /* IEEE 802.3 frame */
- data_len = len + ETH_HLEN;
- data_off = HERMES_802_3_OFFSET;
- p = skb->data;
- }
+ err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
+ NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
+ } else {
+ *active = 0;
- /* Round up for odd length packets */
- err = hermes_bap_pwrite(hw, USER_BAP, p, RUP_EVEN(data_len), txfid, data_off);
- if (err) {
- printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
- dev->name, err);
- stats->tx_errors++;
- goto fail;
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
+ sizeof(essidbuf), NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
}
- /* Finally, we actually initiate the send */
- netif_stop_queue(dev);
-
- err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL, txfid, NULL);
- if (err) {
- netif_start_queue(dev);
- printk(KERN_ERR "%s: Error %d transmitting packet\n", dev->name, err);
- stats->tx_errors++;
- goto fail;
- }
+ len = le16_to_cpu(essidbuf.len);
- dev->trans_start = jiffies;
- stats->tx_bytes += data_off + data_len;
+ memset(buf, 0, IW_ESSID_MAX_SIZE+1);
+ memcpy(buf, p, len);
+ buf[len] = '\0';
+ fail_unlock:
orinoco_unlock(priv, &flags);
- dev_kfree_skb(skb);
-
- TRACE_EXIT(dev->name);
-
- return 0;
- fail:
- TRACE_EXIT(dev->name);
-
- orinoco_unlock(priv, &flags);
- return err;
+ return err;
}
-static void
-orinoco_tx_timeout(struct net_device *dev)
+static long orinoco_hw_get_freq(struct orinoco_private *priv)
{
- struct orinoco_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
- struct hermes *hw = &priv->hw;
-
- printk(KERN_WARNING "%s: Tx timeout! "
- "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
- dev->name, hermes_read_regn(hw, ALLOCFID),
- hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
-
- stats->tx_errors++;
+
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 channel;
+ long freq = 0;
+ unsigned long flags;
- schedule_work(&priv->reset_work);
-}
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel);
+ if (err)
+ goto out;
-static int
-orinoco_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct orinoco_private *priv = netdev_priv(dev);
+ /* Intersil firmware 1.3.5 returns 0 when the interface is down */
+ if (channel == 0) {
+ err = -EBUSY;
+ goto out;
+ }
- if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
- return -EINVAL;
+ if ( (channel < 1) || (channel > NUM_CHANNELS) ) {
+ printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
+ priv->ndev->name, channel);
+ err = -EBUSY;
+ goto out;
- if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) >
- (priv->nicbuf_size - ETH_HLEN) )
- return -EINVAL;
+ }
+ freq = channel_frequency[channel-1] * 100000;
- dev->mtu = new_mtu;
+ out:
+ orinoco_unlock(priv, &flags);
- return 0;
+ if (err > 0)
+ err = -EBUSY;
+ return err ? err : freq;
}
-/* FIXME: return int? */
-static void
-__orinoco_set_multicast_list(struct net_device *dev)
+static int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
+ int *numrates, s32 *rates, int max)
{
- struct orinoco_private *priv = netdev_priv(dev);
hermes_t *hw = &priv->hw;
+ struct hermes_idstring list;
+ unsigned char *p = (unsigned char *)&list.val;
int err = 0;
- int promisc, mc_count;
-
- /* The Hermes doesn't seem to have an allmulti mode, so we go
- * into promiscuous mode and let the upper levels deal. */
- if ( (dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > MAX_MULTICAST(priv)) ) {
- promisc = 1;
- mc_count = 0;
- } else {
- promisc = 0;
- mc_count = dev->mc_count;
- }
+ int num;
+ int i;
+ unsigned long flags;
- if (promisc != priv->promiscuous) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFPROMISCUOUSMODE,
- promisc);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
- dev->name, err);
- } else
- priv->promiscuous = promisc;
- }
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
- if (! promisc && (mc_count || priv->mc_count) ) {
- struct dev_mc_list *p = dev->mc_list;
- hermes_multicast_t mclist;
- int i;
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
+ sizeof(list), NULL, &list);
+ orinoco_unlock(priv, &flags);
- for (i = 0; i < mc_count; i++) {
- /* Paranoia: */
- if (! p)
- BUG(); /* Multicast list shorter than mc_count */
- if (p->dmi_addrlen != ETH_ALEN)
- BUG(); /* Bad address size in multicast list */
-
- memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
- p = p->next;
- }
-
- if (p)
- printk(KERN_WARNING "Multicast list is longer than mc_count\n");
+ if (err)
+ return err;
+
+ num = le16_to_cpu(list.len);
+ *numrates = num;
+ num = min(num, max);
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES,
- HERMES_BYTES_TO_RECLEN(priv->mc_count * ETH_ALEN),
- &mclist);
- if (err)
- printk(KERN_ERR "%s: Error %d setting multicast list.\n",
- dev->name, err);
- else
- priv->mc_count = mc_count;
+ for (i = 0; i < num; i++) {
+ rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
}
- /* Since we can set the promiscuous flag when it wasn't asked
- for, make sure the net_device knows about it. */
- if (priv->promiscuous)
- dev->flags |= IFF_PROMISC;
- else
- dev->flags &= ~IFF_PROMISC;
+ return 0;
}
-/********************************************************************/
-/* Wireless extensions support */
-/********************************************************************/
-
static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq)
{
struct orinoco_private *priv = netdev_priv(dev);
@@ -2582,10 +2579,8 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq
/* Much of this shamelessly taken from wvlan_cs.c. No idea
* what it all means -dgibson */
-#if WIRELESS_EXT > 10
range.we_version_compiled = WIRELESS_EXT;
range.we_version_source = 11;
-#endif /* WIRELESS_EXT > 10 */
range.min_nwid = range.max_nwid = 0; /* We don't use nwids */
@@ -2612,22 +2607,17 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq
range.max_qual.qual = 0;
range.max_qual.level = 0;
range.max_qual.noise = 0;
-#if WIRELESS_EXT > 11
range.avg_qual.qual = 0;
range.avg_qual.level = 0;
range.avg_qual.noise = 0;
-#endif /* WIRELESS_EXT > 11 */
-
} else {
range.max_qual.qual = 0x8b - 0x2f;
range.max_qual.level = 0x2f - 0x95 - 1;
range.max_qual.noise = 0x2f - 0x95 - 1;
-#if WIRELESS_EXT > 11
/* Need to get better values */
range.avg_qual.qual = 0x24;
range.avg_qual.level = 0xC2;
range.avg_qual.noise = 0x9E;
-#endif /* WIRELESS_EXT > 11 */
}
err = orinoco_hw_get_bitratelist(priv, &numrates,
@@ -2680,7 +2670,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq
range.txpower[0] = 15; /* 15dBm */
range.txpower_capa = IW_TXPOW_DBM;
-#if WIRELESS_EXT > 10
range.retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
range.retry_flags = IW_RETRY_LIMIT;
range.r_time_flags = IW_RETRY_LIFETIME;
@@ -2688,7 +2677,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq
range.max_retry = 65535; /* ??? */
range.min_r_time = 0;
range.max_r_time = 65535 * 1000; /* ??? */
-#endif /* WIRELESS_EXT > 10 */
if (copy_to_user(rrq->pointer, &range, sizeof(range)))
return -EFAULT;
@@ -2737,7 +2725,8 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_point *er
if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
index = priv->tx_key;
-
+
+ /* Adjust key length to a supported value */
if (erq->length > SMALL_KEY_SIZE) {
xlen = LARGE_KEY_SIZE;
} else if (erq->length > 0) {
@@ -2779,14 +2768,14 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_point *er
if (erq->pointer) {
priv->keys[index].len = cpu_to_le16(xlen);
- memset(priv->keys[index].data, 0, sizeof(priv->keys[index].data));
+ memset(priv->keys[index].data, 0,
+ sizeof(priv->keys[index].data));
memcpy(priv->keys[index].data, keybuf, erq->length);
}
priv->tx_key = setindex;
priv->wep_on = enable;
priv->wep_restrict = restricted;
-
out:
orinoco_unlock(priv, &flags);
@@ -3012,7 +3001,8 @@ static int orinoco_ioctl_getsens(struct net_device *dev, struct iw_param *srq)
err = orinoco_lock(priv, &flags);
if (err)
return err;
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &val);
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFSYSTEMSCALE, &val);
orinoco_unlock(priv, &flags);
if (err)
@@ -3201,8 +3191,7 @@ static int orinoco_ioctl_getrate(struct net_device *dev, struct iw_param *rrq)
ratemode = priv->bitratemode;
- if ( (ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE) )
- BUG();
+ BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE));
rrq->value = bitrate_table[ratemode].bitrate * 100000;
rrq->fixed = ! bitrate_table[ratemode].automatic;
@@ -3354,7 +3343,6 @@ static int orinoco_ioctl_getpower(struct net_device *dev, struct iw_param *prq)
return err;
}
-#if WIRELESS_EXT > 10
static int orinoco_ioctl_getretry(struct net_device *dev, struct iw_param *rrq)
{
struct orinoco_private *priv = netdev_priv(dev);
@@ -3406,7 +3394,6 @@ static int orinoco_ioctl_getretry(struct net_device *dev, struct iw_param *rrq)
return err;
}
-#endif /* WIRELESS_EXT > 10 */
static int orinoco_ioctl_setibssport(struct net_device *dev, struct iwreq *wrq)
{
@@ -3500,7 +3487,6 @@ static int orinoco_ioctl_getport3(struct net_device *dev, struct iwreq *wrq)
*val = priv->prefer_port3;
orinoco_unlock(priv, &flags);
-
return 0;
}
@@ -3790,7 +3776,6 @@ orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
wrq->u.txpower.flags = IW_TXPOW_DBM;
break;
-#if WIRELESS_EXT > 10
case SIOCSIWRETRY:
err = -EOPNOTSUPP;
break;
@@ -3798,7 +3783,6 @@ orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGIWRETRY:
err = orinoco_ioctl_getretry(dev, &wrq->u.retry);
break;
-#endif /* WIRELESS_EXT > 10 */
case SIOCSIWSPY:
err = orinoco_ioctl_setspy(dev, &wrq->u.data);
@@ -4124,51 +4108,68 @@ static int orinoco_debug_dump_recs(struct net_device *dev)
return 0;
}
-struct net_device *alloc_orinocodev(int sizeof_card, int (*hard_reset)(struct orinoco_private *))
-{
- struct net_device *dev;
- struct orinoco_private *priv;
-
- dev = alloc_etherdev(sizeof(struct orinoco_private) + sizeof_card);
- if (!dev)
- return NULL;
- priv = netdev_priv(dev);
- priv->ndev = dev;
- if (sizeof_card)
- priv->card = (void *)((unsigned long)dev->priv + sizeof(struct orinoco_private));
- else
- priv->card = NULL;
-
- /* Setup / override net_device fields */
- dev->init = orinoco_init;
- dev->hard_start_xmit = orinoco_xmit;
- dev->tx_timeout = orinoco_tx_timeout;
- dev->watchdog_timeo = HZ; /* 1 second timeout */
- dev->get_stats = orinoco_get_stats;
- dev->get_wireless_stats = orinoco_get_wireless_stats;
- dev->do_ioctl = orinoco_ioctl;
- dev->change_mtu = orinoco_change_mtu;
- dev->set_multicast_list = orinoco_set_multicast_list;
- /* we use the default eth_mac_addr for setting the MAC addr */
-
- /* Set up default callbacks */
- dev->open = orinoco_open;
- dev->stop = orinoco_stop;
- priv->hard_reset = hard_reset;
+/********************************************************************/
+/* Debugging */
+/********************************************************************/
- spin_lock_init(&priv->lock);
- priv->open = 0;
- priv->hw_unavailable = 1; /* orinoco_init() must clear this
- * before anything else touches the
- * hardware */
- INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
+#if 0
+static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
+{
+ printk(KERN_DEBUG "RX descriptor:\n");
+ printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
+ printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
+ printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
+ printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
+ printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
+ printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
+ printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
- priv->last_linkstatus = 0xffff;
- priv->connected = 0;
+ printk(KERN_DEBUG "IEEE 802.11 header:\n");
+ printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
+ frame->p80211.frame_ctl);
+ printk(KERN_DEBUG " duration_id = 0x%04x\n",
+ frame->p80211.duration_id);
+ printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr1[0], frame->p80211.addr1[1],
+ frame->p80211.addr1[2], frame->p80211.addr1[3],
+ frame->p80211.addr1[4], frame->p80211.addr1[5]);
+ printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr2[0], frame->p80211.addr2[1],
+ frame->p80211.addr2[2], frame->p80211.addr2[3],
+ frame->p80211.addr2[4], frame->p80211.addr2[5]);
+ printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr3[0], frame->p80211.addr3[1],
+ frame->p80211.addr3[2], frame->p80211.addr3[3],
+ frame->p80211.addr3[4], frame->p80211.addr3[5]);
+ printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
+ frame->p80211.seq_ctl);
+ printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr4[0], frame->p80211.addr4[1],
+ frame->p80211.addr4[2], frame->p80211.addr4[3],
+ frame->p80211.addr4[4], frame->p80211.addr4[5]);
+ printk(KERN_DEBUG " data_len = 0x%04x\n",
+ frame->p80211.data_len);
- return dev;
+ printk(KERN_DEBUG "IEEE 802.3 header:\n");
+ printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_dest[0], frame->p8023.h_dest[1],
+ frame->p8023.h_dest[2], frame->p8023.h_dest[3],
+ frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
+ printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_source[0], frame->p8023.h_source[1],
+ frame->p8023.h_source[2], frame->p8023.h_source[3],
+ frame->p8023.h_source[4], frame->p8023.h_source[5]);
+ printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
+ printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
+ printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
+ printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
+ printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
+ printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
+ frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
+ printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
}
+#endif /* 0 */
/********************************************************************/
/* Module initialization */
@@ -4185,7 +4186,9 @@ EXPORT_SYMBOL(orinoco_interrupt);
/* Can't be declared "const" or the whole __initdata section will
* become const */
-static char version[] __initdata = "orinoco.c 0.13e (David Gibson <hermes@gibson.dropbear.id.au> and others)";
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (David Gibson <hermes@gibson.dropbear.id.au>, "
+ "Pavel Roskin <proski@gnu.org>, et al)";
static int __init init_orinoco(void)
{
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index 972a8510f60fbc..c2f5f78f512f8b 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -7,39 +7,19 @@
#ifndef _ORINOCO_H
#define _ORINOCO_H
+#define DRIVER_VERSION "0.13e"
+
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/version.h>
-#include "hermes.h"
-
-/* Workqueue / task queue backwards compatibility stuff */
-
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
-#include <linux/workqueue.h>
-#else
-#include <linux/tqueue.h>
-#define work_struct tq_struct
-#define INIT_WORK INIT_TQUEUE
-#define schedule_work schedule_task
-#endif
-
-/* Interrupt handler backwards compatibility stuff */
-#ifndef IRQ_NONE
-
-#define IRQ_NONE
-#define IRQ_HANDLED
-typedef void irqreturn_t;
-#endif
+#include "hermes.h"
/* To enable debug messages */
//#define ORINOCO_DEBUG 3
-#if (! defined (WIRELESS_EXT)) || (WIRELESS_EXT < 10)
-#error "orinoco driver requires Wireless extensions v10 or later."
-#endif /* (! defined (WIRELESS_EXT)) || (WIRELESS_EXT < 10) */
#define WIRELESS_SPY // enable iwspy support
#define ORINOCO_MAX_KEY_SIZE 14
@@ -50,11 +30,6 @@ struct orinoco_key {
char data[ORINOCO_MAX_KEY_SIZE];
} __attribute__ ((packed));
-#define ORINOCO_INTEN ( HERMES_EV_RX | HERMES_EV_ALLOC | HERMES_EV_TX | \
- HERMES_EV_TXEXC | HERMES_EV_WTERR | HERMES_EV_INFO | \
- HERMES_EV_INFDROP )
-
-
struct orinoco_private {
void *card; /* Pointer to card dependent structure */
int (*hard_reset)(struct orinoco_private *);
@@ -78,7 +53,6 @@ struct orinoco_private {
hermes_t hw;
u16 txfid;
-
/* Capabilities of the hardware/firmware */
int firmware_type;
#define FIRMWARE_TYPE_AGERE 1
@@ -128,6 +102,10 @@ extern int orinoco_debug;
#define TRACE_ENTER(devname) DEBUG(2, "%s: -> %s()\n", devname, __FUNCTION__);
#define TRACE_EXIT(devname) DEBUG(2, "%s: <- %s()\n", devname, __FUNCTION__);
+/********************************************************************/
+/* Exported prototypes */
+/********************************************************************/
+
extern struct net_device *alloc_orinocodev(int sizeof_card,
int (*hard_reset)(struct orinoco_private *));
extern int __orinoco_up(struct net_device *dev);
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 292eeffb65601f..4eb92ac6a5c6b7 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -1,4 +1,4 @@
-/* orinoco_cs.c 0.13e - (formerly known as dldwd_cs.c)
+/* orinoco_cs.c (formerly known as dldwd_cs.c)
*
* A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
* as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
@@ -10,6 +10,9 @@
* Copyright notice & release notes in file orinoco.c
*/
+#define DRIVER_NAME "orinoco_cs"
+#define PFX DRIVER_NAME ": "
+
#include <linux/config.h>
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
@@ -47,9 +50,7 @@
MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco, Prism II based and similar wireless cards");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("Dual MPL/GPL");
-#endif
/* Module parameters */
@@ -76,7 +77,7 @@ MODULE_PARM(ignore_cis_vcc, "i");
* device driver with appropriate cards, through the card
* configuration database.
*/
-static dev_info_t dev_info = "orinoco_cs";
+static dev_info_t dev_info = DRIVER_NAME;
/********************************************************************/
/* Data structures */
@@ -144,15 +145,6 @@ orinoco_cs_hard_reset(struct orinoco_private *priv)
/* PCMCIA stuff */
/********************************************************************/
-/* In 2.5 (as of 2.5.69 at least) there is a cs_error exported which
- * does this, but it's not in 2.4 so we do our own for now. */
-static void
-orinoco_cs_error(client_handle_t handle, int func, int ret)
-{
- error_info_t err = { func, ret };
- pcmcia_report_error(handle, &err);
-}
-
/*
* This creates an "instance" of the driver, allocating local data
* structures for one device. The device is registered with Card
@@ -174,7 +166,7 @@ orinoco_cs_attach(void)
dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset);
if (! dev)
return NULL;
- priv = dev->priv;
+ priv = netdev_priv(dev);
card = priv->card;
/* Link both structures together */
@@ -216,7 +208,7 @@ orinoco_cs_attach(void)
ret = pcmcia_register_client(&link->handle, &client_reg);
if (ret != CS_SUCCESS) {
- orinoco_cs_error(link->handle, RegisterClient, ret);
+ cs_error(link->handle, RegisterClient, ret);
orinoco_cs_detach(link);
return NULL;
}
@@ -230,8 +222,7 @@ orinoco_cs_attach(void)
* are freed. Otherwise, the structures will be freed when the device
* is released.
*/
-static void
-orinoco_cs_detach(dev_link_t * link)
+static void orinoco_cs_detach(dev_link_t *link)
{
dev_link_t **linkp;
struct net_device *dev = link->priv;
@@ -240,10 +231,8 @@ orinoco_cs_detach(dev_link_t * link)
for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
if (*linkp == link)
break;
- if (*linkp == NULL) {
- BUG();
- return;
- }
+
+ BUG_ON(*linkp == NULL);
if (link->state & DEV_CONFIG)
orinoco_cs_release(link);
@@ -254,9 +243,9 @@ orinoco_cs_detach(dev_link_t * link)
/* Unlink device structure, and free it */
*linkp = link->next;
- DEBUG(0, "orinoco_cs: detach: link=%p link->dev=%p\n", link, link->dev);
+ DEBUG(0, PFX "detach: link=%p link->dev=%p\n", link, link->dev);
if (link->dev) {
- DEBUG(0, "orinoco_cs: About to unregister net device %p\n",
+ DEBUG(0, PFX "About to unregister net device %p\n",
dev);
unregister_netdev(dev);
}
@@ -269,15 +258,16 @@ orinoco_cs_detach(dev_link_t * link)
* device available to the system.
*/
-#define CS_CHECK(fn, ret) \
-do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+#define CS_CHECK(fn, ret) do { \
+ last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; \
+ } while (0)
static void
orinoco_cs_config(dev_link_t *link)
{
struct net_device *dev = link->priv;
client_handle_t handle = link->handle;
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
hermes_t *hw = &priv->hw;
int last_fn, last_ret;
@@ -308,7 +298,8 @@ orinoco_cs_config(dev_link_t *link)
link->state |= DEV_CONFIG;
/* Look up the current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
+ CS_CHECK(GetConfigurationInfo,
+ pcmcia_get_configuration_info(handle, &conf));
link->conf.Vcc = conf.Vcc;
/*
@@ -412,8 +403,9 @@ orinoco_cs_config(dev_link_t *link)
pcmcia_release_io(link->handle, &link->io);
last_ret = pcmcia_get_next_tuple(handle, &tuple);
if (last_ret == CS_NO_MORE_ITEMS) {
- printk(KERN_ERR "GetNextTuple(). No matching CIS configuration, "
- "maybe you need the ignore_cis_vcc=1 parameter.\n");
+ printk(KERN_ERR PFX "GetNextTuple(): No matching "
+ "CIS configuration, maybe you need the "
+ "ignore_cis_vcc=1 parameter.\n");
goto cs_failed;
}
}
@@ -451,7 +443,8 @@ orinoco_cs_config(dev_link_t *link)
* the I/O windows and the interrupt mapping, and putting the
* card and host interface into "Memory and IO" mode.
*/
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration,
+ pcmcia_request_configuration(link->handle, &link->conf));
/* Ok, we have the configuration, prepare to register the netdev */
dev->base_addr = link->io.BasePort1;
@@ -463,7 +456,7 @@ orinoco_cs_config(dev_link_t *link)
dev->name[0] = '\0';
/* Tell the stack we exist */
if (register_netdev(dev) != 0) {
- printk(KERN_ERR "orinoco_cs: register_netdev() failed\n");
+ printk(KERN_ERR PFX "register_netdev() failed\n");
goto failed;
}
@@ -495,7 +488,7 @@ orinoco_cs_config(dev_link_t *link)
return;
cs_failed:
- orinoco_cs_error(link->handle, last_fn, last_ret);
+ cs_error(link->handle, last_fn, last_ret);
failed:
orinoco_cs_release(link);
@@ -510,7 +503,7 @@ static void
orinoco_cs_release(dev_link_t *link)
{
struct net_device *dev = link->priv;
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
unsigned long flags;
/* We're committed to taking the device away now, so mark the
@@ -538,7 +531,7 @@ orinoco_cs_event(event_t event, int priority,
{
dev_link_t *link = args->client_data;
struct net_device *dev = link->priv;
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
int err = 0;
unsigned long flags;
@@ -635,12 +628,14 @@ orinoco_cs_event(event_t event, int priority,
/* Can't be declared "const" or the whole __initdata section will
* become const */
-static char version[] __initdata = "orinoco_cs.c 0.13e (David Gibson <hermes@gibson.dropbear.id.au> and others)";
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (David Gibson <hermes@gibson.dropbear.id.au>, "
+ "Pavel Roskin <proski@gnu.org>, et al)";
static struct pcmcia_driver orinoco_driver = {
.owner = THIS_MODULE,
.drv = {
- .name = "orinoco_cs",
+ .name = DRIVER_NAME,
},
.attach = orinoco_cs_attach,
.detach = orinoco_cs_detach,
@@ -660,7 +655,7 @@ exit_orinoco_cs(void)
pcmcia_unregister_driver(&orinoco_driver);
if (dev_list)
- DEBUG(0, "orinoco_cs: Removing leftover devices.\n");
+ DEBUG(0, PFX "Removing leftover devices.\n");
while (dev_list != NULL) {
if (dev_list->state & DEV_CONFIG)
orinoco_cs_release(dev_list);
@@ -670,4 +665,3 @@ exit_orinoco_cs(void)
module_init(init_orinoco_cs);
module_exit(exit_orinoco_cs);
-
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 23222b951d5d1b..3e58b75837097e 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -1,19 +1,23 @@
-/* orinoco_pci.c 0.13e
+/* orinoco_pci.c
*
* Driver for Prism II devices that have a direct PCI interface
* (i.e., not in a Pcmcia or PLX bridge)
*
* Specifically here we're talking about the Linksys WMP11
*
+ * Current maintainers (as of 29 September 2003) are:
+ * Pavel Roskin <proski AT gnu.org>
+ * and David Gibson <hermes AT gibson.dropbear.id.au>
+ *
* Some of this code is borrowed from orinoco_plx.c
- * Copyright (C) 2001 Daniel Barlow <dan@telent.net>
+ * Copyright (C) 2001 Daniel Barlow <dan AT telent.net>
* Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing
* has been copied from it. linux-wlan-ng-0.1.10 is originally :
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* This file originally written by:
- * Copyright (C) 2001 Jean Tourrilhes <jt@hpl.hp.com>
+ * Copyright (C) 2001 Jean Tourrilhes <jt AT hpl.hp.com>
* And is now maintained by:
- * Copyright (C) 2002 David Gibson, IBM Corporation <herme@gibson.dropbear.id.au>
+ * (C) Copyright David Gibson, IBM Corp. 2002-2003.
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
@@ -85,6 +89,9 @@
* Jean II
*/
+#define DRIVER_NAME "orinoco_pci"
+#define PFX DRIVER_NAME ": "
+
#include <linux/config.h>
#include <linux/module.h>
@@ -99,7 +106,6 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
-#include <linux/wireless.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/fcntl.h>
@@ -174,7 +180,7 @@ orinoco_pci_cor_reset(struct orinoco_private *priv)
}
/* Did we timeout ? */
if(time_after_eq(jiffies, timeout)) {
- printk(KERN_ERR "orinoco_pci: Busy timeout\n");
+ printk(KERN_ERR PFX "Busy timeout\n");
return -ETIMEDOUT;
}
printk(KERN_NOTICE "pci_cor : reg = 0x%X - %lX - %lX\n", reg, timeout, jiffies);
@@ -206,22 +212,21 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
if (! pci_iorange)
goto fail;
- /* Usual setup of structures */
+ /* Allocate network device */
dev = alloc_orinocodev(0, NULL);
if (! dev) {
err = -ENOMEM;
goto fail;
}
- priv = dev->priv;
+ priv = netdev_priv(dev);
dev->base_addr = (unsigned long) pci_ioaddr;
dev->mem_start = pci_iorange;
dev->mem_end = pci_iorange + pci_iolen - 1;
-
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- printk(KERN_DEBUG
+ printk(KERN_DEBUG PFX
"Detected Orinoco/Prism2 PCI device at %s, mem:0x%lX to 0x%lX -> 0x%p, irq:%d\n",
pci_name(pdev), dev->mem_start, dev->mem_end, pci_ioaddr, pdev->irq);
@@ -232,12 +237,13 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
if (err) {
- printk(KERN_ERR "orinoco_pci: Error allocating IRQ %d.\n",
+ printk(KERN_ERR PFX "Error allocating IRQ %d.\n",
pdev->irq);
err = -EBUSY;
goto fail;
}
dev->irq = pdev->irq;
+
/* Perform a COR reset to start the card */
if(orinoco_pci_cor_reset(priv) != 0) {
printk(KERN_ERR "%s: Failed to start the card\n", dev->name);
@@ -255,7 +261,8 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
goto fail;
}
- return 0; /* succeeded */
+ return 0;
+
fail:
if (dev) {
if (dev->irq)
@@ -275,11 +282,11 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
unregister_netdev(dev);
- if (dev->irq)
+ if (dev->irq)
free_irq(dev->irq, dev);
if (priv->hw.iobase)
@@ -294,7 +301,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
static int orinoco_pci_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
unsigned long flags;
int err;
@@ -325,7 +332,7 @@ static int orinoco_pci_suspend(struct pci_dev *pdev, u32 state)
static int orinoco_pci_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct orinoco_private *priv = dev->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
unsigned long flags;
int err;
@@ -357,7 +364,9 @@ static int orinoco_pci_resume(struct pci_dev *pdev)
}
static struct pci_device_id orinoco_pci_pci_id_table[] = {
+ /* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
+ /* Intersil Prism 2.5 */
{0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
@@ -365,7 +374,7 @@ static struct pci_device_id orinoco_pci_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, orinoco_pci_pci_id_table);
static struct pci_driver orinoco_pci_driver = {
- .name = "orinoco_pci",
+ .name = DRIVER_NAME,
.id_table = orinoco_pci_pci_id_table,
.probe = orinoco_pci_init_one,
.remove = __devexit_p(orinoco_pci_remove_one),
@@ -373,8 +382,11 @@ static struct pci_driver orinoco_pci_driver = {
.resume = orinoco_pci_resume,
};
-static char version[] __initdata = "orinoco_pci.c 0.13e (David Gibson <hermes@gibson.dropbear.id.au> & Jean Tourrilhes <jt@hpl.hp.com>)";
-MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Pavel Roskin <proski@gnu.org>,"
+ " David Gibson <hermes@gibson.dropbear.id.au> &"
+ " Jean Tourrilhes <jt@hpl.hp.com>)";
+MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & David Gibson <hermes@gibson.dropbear.id.au>");
MODULE_DESCRIPTION("Driver for wireless LAN cards using direct PCI interface");
MODULE_LICENSE("Dual MPL/GPL");
@@ -384,7 +396,7 @@ static int __init orinoco_pci_init(void)
return pci_module_init(&orinoco_pci_driver);
}
-void __exit orinoco_pci_exit(void)
+static void __exit orinoco_pci_exit(void)
{
pci_unregister_driver(&orinoco_pci_driver);
}
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index b5660defd85568..d21ca78f026f71 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -1,9 +1,14 @@
-/* orinoco_plx.c 0.13e
- *
+/* orinoco_plx.c
+ *
* Driver for Prism II devices which would usually be driven by orinoco_cs,
- * but are connected to the PCI bus by a PLX9052.
+ * but are connected to the PCI bus by a PLX9052.
+ *
+ * Current maintainers (as of 29 September 2003) are:
+ * Pavel Roskin <proski AT gnu.org>
+ * and David Gibson <hermes AT gibson.dropbear.id.au>
*
- * Copyright (C) 2001 Daniel Barlow <dan@telent.net>
+ * (C) Copyright David Gibson, IBM Corp. 2001-2003.
+ * Copyright (C) 2001 Daniel Barlow
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
@@ -33,77 +38,83 @@
* drop me mail with the id and "it works"/"it doesn't work".
*
* Note: if everything gets detected fine but it doesn't actually send
- * or receive packets, your first port of call should probably be to
+ * or receive packets, your first port of call should probably be to
* try newer firmware in the card. Especially if you're doing Ad-Hoc
- * modes
+ * modes.
*
* The actual driving is done by orinoco.c, this is just resource
* allocation stuff. The explanation below is courtesy of Ryan Niemi
* on the linux-wlan-ng list at
* http://archives.neohapsis.com/archives/dev/linux-wlan/2001-q1/0026.html
+ *
+ * The PLX9052-based cards (WL11000 and several others) are a
+ * different beast than the usual PCMCIA-based PRISM2 configuration
+ * expected by wlan-ng. Here's the general details on how the WL11000
+ * PCI adapter works:
+ *
+ * - Two PCI I/O address spaces, one 0x80 long which contains the
+ * PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA
+ * slot I/O address space.
+ *
+ * - One PCI memory address space, mapped to the PCMCIA memory space
+ * (containing the CIS).
+ *
+ * After identifying the I/O and memory space, you can read through
+ * the memory space to confirm the CIS's device ID or manufacturer ID
+ * to make sure it's the expected card. qKeep in mind that the PCMCIA
+ * spec specifies the CIS as the lower 8 bits of each word read from
+ * the CIS, so to read the bytes of the CIS, read every other byte
+ * (0,2,4,...). Passing that test, you need to enable the I/O address
+ * space on the PCMCIA card via the PCMCIA COR register. This is the
+ * first byte following the CIS. In my case (which may not have any
+ * relation to what's on the PRISM2 cards), COR was at offset 0x800
+ * within the PCI memory space. Write 0x41 to the COR register to
+ * enable I/O mode and to select level triggered interrupts. To
+ * confirm you actually succeeded, read the COR register back and make
+ * sure it actually got set to 0x41, incase you have an unexpected
+ * card inserted.
+ *
+ * Following that, you can treat the second PCI I/O address space (the
+ * one that's not 0x80 in length) as the PCMCIA I/O space.
+ *
+ * Note that in the Eumitcom's source for their drivers, they register
+ * the interrupt as edge triggered when registering it with the
+ * Windows kernel. I don't recall how to register edge triggered on
+ * Linux (if it can be done at all). But in some experimentation, I
+ * don't see much operational difference between using either
+ * interrupt mode. Don't mess with the interrupt mode in the COR
+ * register though, as the PLX9052 wants level triggers with the way
+ * the serial EEPROM configures it on the WL11000.
+ *
+ * There's some other little quirks related to timing that I bumped
+ * into, but I don't recall right now. Also, there's two variants of
+ * the WL11000 I've seen, revision A1 and T2. These seem to differ
+ * slightly in the timings configured in the wait-state generator in
+ * the PLX9052. There have also been some comments from Eumitcom that
+ * cards shouldn't be hot swapped, apparently due to risk of cooking
+ * the PLX9052. I'm unsure why they believe this, as I can't see
+ * anything in the design that would really cause a problem, except
+ * for crashing drivers not written to expect it. And having developed
+ * drivers for the WL11000, I'd say it's quite tricky to write code
+ * that will successfully deal with a hot unplug. Very odd things
+ * happen on the I/O side of things. But anyway, be warned. Despite
+ * that, I've hot-swapped a number of times during debugging and
+ * driver development for various reasons (stuck WAIT# line after the
+ * radio card's firmware locks up).
+ *
+ * Hope this is enough info for someone to add PLX9052 support to the
+ * wlan-ng card. In the case of the WL11000, the PCI ID's are
+ * 0x1639/0x0200, with matching subsystem ID's. Other PLX9052-based
+ * manufacturers other than Eumitcom (or on cards other than the
+ * WL11000) may have different PCI ID's.
+ *
+ * If anyone needs any more specific info, let me know. I haven't had
+ * time to implement support myself yet, and with the way things are
+ * going, might not have time for a while..
+ */
-The PLX9052-based cards (WL11000 and several others) are a different
-beast than the usual PCMCIA-based PRISM2 configuration expected by
-wlan-ng. Here's the general details on how the WL11000 PCI adapter
-works:
-
- - Two PCI I/O address spaces, one 0x80 long which contains the PLX9052
- registers, and one that's 0x40 long mapped to the PCMCIA slot I/O
- address space.
-
- - One PCI memory address space, mapped to the PCMCIA memory space
- (containing the CIS).
-
-After identifying the I/O and memory space, you can read through the
-memory space to confirm the CIS's device ID or manufacturer ID to make
-sure it's the expected card. Keep in mind that the PCMCIA spec specifies
-the CIS as the lower 8 bits of each word read from the CIS, so to read the
-bytes of the CIS, read every other byte (0,2,4,...). Passing that test,
-you need to enable the I/O address space on the PCMCIA card via the PCMCIA
-COR register. This is the first byte following the CIS. In my case
-(which may not have any relation to what's on the PRISM2 cards), COR was
-at offset 0x800 within the PCI memory space. Write 0x41 to the COR
-register to enable I/O mode and to select level triggered interrupts. To
-confirm you actually succeeded, read the COR register back and make sure
-it actually got set to 0x41, incase you have an unexpected card inserted.
-
-Following that, you can treat the second PCI I/O address space (the one
-that's not 0x80 in length) as the PCMCIA I/O space.
-
-Note that in the Eumitcom's source for their drivers, they register the
-interrupt as edge triggered when registering it with the Windows kernel. I
-don't recall how to register edge triggered on Linux (if it can be done at
-all). But in some experimentation, I don't see much operational
-difference between using either interrupt mode. Don't mess with the
-interrupt mode in the COR register though, as the PLX9052 wants level
-triggers with the way the serial EEPROM configures it on the WL11000.
-
-There's some other little quirks related to timing that I bumped into, but
-I don't recall right now. Also, there's two variants of the WL11000 I've
-seen, revision A1 and T2. These seem to differ slightly in the timings
-configured in the wait-state generator in the PLX9052. There have also
-been some comments from Eumitcom that cards shouldn't be hot swapped,
-apparently due to risk of cooking the PLX9052. I'm unsure why they
-believe this, as I can't see anything in the design that would really
-cause a problem, except for crashing drivers not written to expect it. And
-having developed drivers for the WL11000, I'd say it's quite tricky to
-write code that will successfully deal with a hot unplug. Very odd things
-happen on the I/O side of things. But anyway, be warned. Despite that,
-I've hot-swapped a number of times during debugging and driver development
-for various reasons (stuck WAIT# line after the radio card's firmware
-locks up).
-
-Hope this is enough info for someone to add PLX9052 support to the wlan-ng
-card. In the case of the WL11000, the PCI ID's are 0x1639/0x0200, with
-matching subsystem ID's. Other PLX9052-based manufacturers other than
-Eumitcom (or on cards other than the WL11000) may have different PCI ID's.
-
-If anyone needs any more specific info, let me know. I haven't had time
-to implement support myself yet, and with the way things are going, might
-not have time for a while..
-
----end of mail---
-*/
+#define DRIVER_NAME "orinoco_plx"
+#define PFX DRIVER_NAME ": "
#include <linux/config.h>
@@ -122,7 +133,6 @@ not have time for a while..
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
-#include <linux/wireless.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/fcntl.h>
@@ -132,13 +142,11 @@ not have time for a while..
#include "hermes.h"
#include "orinoco.h"
-static char dev_info[] = "orinoco_plx";
-
-#define COR_OFFSET (0x3e0 / 2) /* COR attribute offset of Prism2 PC card */
-#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
+#define COR_OFFSET (0x3e0/2) /* COR attribute offset of Prism2 PC card */
+#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
-#define PLX_INTCSR 0x4c /* Interrupt Control and Status Register */
-#define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */
+#define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */
+#define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */
static const u16 cis_magic[] = {
0x0001, 0x0003, 0x0000, 0x0000, 0x00ff, 0x0017, 0x0004, 0x0067
@@ -215,7 +223,7 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
/* and 3 to the PCMCIA slot I/O address space */
pccard_ioaddr = pci_resource_start(pdev, 3);
pccard_iolen = pci_resource_len(pdev, 3);
- if (! request_region(pccard_ioaddr, pccard_iolen, dev_info)) {
+ if (! request_region(pccard_ioaddr, pccard_iolen, DRIVER_NAME)) {
printk(KERN_ERR "orinoco_plx: I/O resource 0x%lx @ 0x%lx busy\n",
pccard_iolen, pccard_ioaddr);
pccard_ioaddr = 0;
@@ -223,28 +231,30 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
goto fail;
}
+ /* Allocate network device */
dev = alloc_orinocodev(0, NULL);
if (! dev) {
err = -ENOMEM;
goto fail;
}
- priv = dev->priv;
+ priv = netdev_priv(dev);
dev->base_addr = pccard_ioaddr;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- printk(KERN_DEBUG
- "Detected Orinoco/Prism2 PLX device at %s irq:%d, io addr:0x%lx\n",
- pci_name(pdev), pdev->irq, pccard_ioaddr);
+ printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 PLX device "
+ "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
+ pccard_ioaddr);
- hermes_struct_init(&(priv->hw), dev->base_addr,
- HERMES_IO, HERMES_16BIT_REGSPACING);
+ hermes_struct_init(&(priv->hw), dev->base_addr, HERMES_IO,
+ HERMES_16BIT_REGSPACING);
pci_set_drvdata(pdev, dev);
- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, dev);
+ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
+ dev->name, dev);
if (err) {
- printk(KERN_ERR "orinoco_plx: Error allocating IRQ %d.\n", pdev->irq);
+ printk(KERN_ERR PFX "Error allocating IRQ %d.\n", pdev->irq);
err = -EBUSY;
goto fail;
}
@@ -254,10 +264,10 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
if (err)
goto fail;
- return 0; /* succeeded */
+ return 0;
- fail:
- printk(KERN_DEBUG "orinoco_plx: init_one(), FAIL!\n");
+ fail:
+ printk(KERN_DEBUG PFX "init_one(), FAIL!\n");
if (dev) {
if (dev->irq)
@@ -281,8 +291,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- if (! dev)
- BUG();
+ BUG_ON(! dev);
unregister_netdev(dev);
@@ -305,33 +314,34 @@ static struct pci_device_id orinoco_plx_pci_id_table[] = {
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
{0x1638, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* SMC EZConnect SMC2602W,
Eumitcom PCI WL11000,
- Addtron AWA-100*/
+ Addtron AWA-100 */
{0x16ab, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* Global Sun Tech GL24110P */
{0x16ab, 0x1101, PCI_ANY_ID, PCI_ANY_ID,}, /* Reported working, but unknown */
{0x16ab, 0x1102, PCI_ANY_ID, PCI_ANY_ID,}, /* Linksys WDT11 */
{0x16ec, 0x3685, PCI_ANY_ID, PCI_ANY_ID,}, /* USR 2415 */
{0xec80, 0xec00, PCI_ANY_ID, PCI_ANY_ID,}, /* Belkin F5D6000 tested by
- Brendan W. McAdams <rit@jacked-in.org> */
+ Brendan W. McAdams <rit AT jacked-in.org> */
{0x10b7, 0x7770, PCI_ANY_ID, PCI_ANY_ID,}, /* 3Com AirConnect PCI tested by
- Damien Persohn <damien@persohn.net> */
+ Damien Persohn <damien AT persohn.net> */
{0,},
};
MODULE_DEVICE_TABLE(pci, orinoco_plx_pci_id_table);
static struct pci_driver orinoco_plx_driver = {
- .name = "orinoco_plx",
+ .name = DRIVER_NAME,
.id_table = orinoco_plx_pci_id_table,
.probe = orinoco_plx_init_one,
.remove = __devexit_p(orinoco_plx_remove_one),
};
-static char version[] __initdata = "orinoco_plx.c 0.13e (Daniel Barlow <dan@telent.net>, David Gibson <hermes@gibson.dropbear.id.au>)";
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Pavel Roskin <proski@gnu.org>,"
+ " David Gibson <hermes@gibson.dropbear.id.au>,"
+ " Daniel Barlow <dan@telent.net>)";
MODULE_AUTHOR("Daniel Barlow <dan@telent.net>");
MODULE_DESCRIPTION("Driver for wireless LAN cards using the PLX9052 PCI bridge");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("Dual MPL/GPL");
-#endif
static int __init orinoco_plx_init(void)
{
@@ -339,7 +349,7 @@ static int __init orinoco_plx_init(void)
return pci_module_init(&orinoco_plx_driver);
}
-void __exit orinoco_plx_exit(void)
+static void __exit orinoco_plx_exit(void)
{
pci_unregister_driver(&orinoco_plx_driver);
current->state = TASK_UNINTERRUPTIBLE;
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index 56ab51cf553d7e..b619f6deab84ad 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -1,10 +1,10 @@
-/* orinoco_tmd.c 0.01
+/* orinoco_tmd.c
*
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a TMD7160.
*
- * Copyright (C) 2003 Joerg Dorchain <joerg@dorchain.net>
- * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow <dan@telent.net>
+ * Copyright (C) 2003 Joerg Dorchain <joerg AT dorchain.net>
+ * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
@@ -49,6 +49,9 @@
* Pheecom sells cards with the TMD chip as "ASIC version"
*/
+#define DRIVER_NAME "orinoco_tmd"
+#define PFX DRIVER_NAME ": "
+
#include <linux/config.h>
#include <linux/module.h>
@@ -66,7 +69,6 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
-#include <linux/wireless.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/fcntl.h>
@@ -76,10 +78,7 @@
#include "hermes.h"
#include "orinoco.h"
-static char dev_info[] = "orinoco_tmd";
-
-#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA | COR_FUNC_ENA) /* Enable PC card with level triggered irqs and irq requests */
-
+#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
static int orinoco_tmd_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -95,11 +94,11 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
if (err)
return -EIO;
- printk(KERN_DEBUG "TMD setup\n");
+ printk(KERN_DEBUG PFX "TMD setup\n");
pccard_ioaddr = pci_resource_start(pdev, 2);
pccard_iolen = pci_resource_len(pdev, 2);
- if (! request_region(pccard_ioaddr, pccard_iolen, dev_info)) {
- printk(KERN_ERR "orinoco_tmd: I/O resource at 0x%lx len 0x%lx busy\n",
+ if (! request_region(pccard_ioaddr, pccard_iolen, DRIVER_NAME)) {
+ printk(KERN_ERR PFX "I/O resource at 0x%lx len 0x%lx busy\n",
pccard_ioaddr, pccard_iolen);
pccard_ioaddr = 0;
err = -EBUSY;
@@ -110,34 +109,35 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
mdelay(1);
reg = inb(addr);
if (reg != COR_VALUE) {
- printk(KERN_ERR "orinoco_tmd: Error setting TMD COR values %x should be %x\n", reg, COR_VALUE);
+ printk(KERN_ERR PFX "Error setting TMD COR values %x should be %x\n", reg, COR_VALUE);
err = -EIO;
goto fail;
}
+ /* Allocate network device */
dev = alloc_orinocodev(0, NULL);
if (! dev) {
err = -ENOMEM;
goto fail;
}
- priv = dev->priv;
+ priv = netdev_priv(dev);
dev->base_addr = pccard_ioaddr;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- printk(KERN_DEBUG
- "Detected Orinoco/Prism2 TMD device at %s irq:%d, io addr:0x%lx\n",
- pci_name(pdev), pdev->irq, pccard_ioaddr);
+ printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 TMD device "
+ "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
+ pccard_ioaddr);
hermes_struct_init(&(priv->hw), dev->base_addr,
HERMES_IO, HERMES_16BIT_REGSPACING);
pci_set_drvdata(pdev, dev);
- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name,
- dev);
+ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
+ dev->name, dev);
if (err) {
- printk(KERN_ERR "orinoco_tmd: Error allocating IRQ %d.\n",
+ printk(KERN_ERR PFX "Error allocating IRQ %d.\n",
pdev->irq);
err = -EBUSY;
goto fail;
@@ -148,10 +148,10 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
if (err)
goto fail;
- return 0; /* succeeded */
+ return 0;
- fail:
- printk(KERN_DEBUG "orinoco_tmd: init_one(), FAIL!\n");
+ fail:
+ printk(KERN_DEBUG PFX "init_one(), FAIL!\n");
if (dev) {
if (dev->irq)
@@ -172,8 +172,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- if (! dev)
- BUG();
+ BUG_ON(! dev);
unregister_netdev(dev);
@@ -198,18 +197,17 @@ static struct pci_device_id orinoco_tmd_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, orinoco_tmd_pci_id_table);
static struct pci_driver orinoco_tmd_driver = {
- .name = "orinoco_tmd",
+ .name = DRIVER_NAME,
.id_table = orinoco_tmd_pci_id_table,
.probe = orinoco_tmd_init_one,
.remove = __devexit_p(orinoco_tmd_remove_one),
};
-static char version[] __initdata = "orinoco_tmd.c 0.01 (Joerg Dorchain <joerg@dorchain.net>)";
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Joerg Dorchain <joerg@dorchain.net>)";
MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
MODULE_DESCRIPTION("Driver for wireless LAN cards using the TMD7160 PCI bridge");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("Dual MPL/GPL");
-#endif
static int __init orinoco_tmd_init(void)
{
@@ -217,7 +215,7 @@ static int __init orinoco_tmd_init(void)
return pci_module_init(&orinoco_tmd_driver);
}
-void __exit orinoco_tmd_exit(void)
+static void __exit orinoco_tmd_exit(void)
{
pci_unregister_driver(&orinoco_tmd_driver);
current->state = TASK_UNINTERRUPTIBLE;